poca-SoccerTwos / run_logs /timers.json
charmquark's picture
First Push
7315201
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4118643999099731,
"min": 1.334549069404602,
"max": 3.2957675457000732,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28689.083984375,
"min": 16186.33203125,
"max": 140877.703125,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 70.73239436619718,
"min": 44.14545454545455,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20088.0,
"min": 14016.0,
"max": 25436.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1757.1116673539389,
"min": 1196.4770090911709,
"max": 1819.6628678182683,
"count": 3779
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 249509.8567642593,
"min": 2392.9540181823418,
"max": 381723.31539122155,
"count": 3779
},
"SoccerTwos.Step.mean": {
"value": 49999992.0,
"min": 9734.0,
"max": 49999992.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999992.0,
"min": 9734.0,
"max": 49999992.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.039098095148801804,
"min": -0.13506445288658142,
"max": 0.21278829872608185,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -5.512831211090088,
"min": -23.636280059814453,
"max": 24.306732177734375,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04226994141936302,
"min": -0.1335238367319107,
"max": 0.21495413780212402,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.960061550140381,
"min": -23.366670608520508,
"max": 24.093002319335938,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.051009929349236455,
"min": -0.5,
"max": 0.7483039258741865,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 7.19240003824234,
"min": -68.52999997138977,
"max": 76.32700043916702,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.051009929349236455,
"min": -0.5,
"max": 0.7483039258741865,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 7.19240003824234,
"min": -68.52999997138977,
"max": 76.32700043916702,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01817262122640386,
"min": 0.010402236335600416,
"max": 0.0282656139228493,
"count": 2387
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01817262122640386,
"min": 0.010402236335600416,
"max": 0.0282656139228493,
"count": 2387
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09874455779790878,
"min": 3.9828985306615085e-11,
"max": 0.11980765188733737,
"count": 2387
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09874455779790878,
"min": 3.9828985306615085e-11,
"max": 0.11980765188733737,
"count": 2387
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10047606552640596,
"min": 6.236301995643511e-11,
"max": 0.12174425944685936,
"count": 2387
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10047606552640596,
"min": 6.236301995643511e-11,
"max": 0.12174425944685936,
"count": 2387
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2387
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2387
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2387
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2387
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2387
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2387
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679462454",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:39:03) \n[GCC 11.3.0]",
"command_line_arguments": "/media/hd/work/hf-rl-u7/_conda/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679553317"
},
"total": 90863.07966329798,
"count": 1,
"self": 0.32276201294735074,
"children": {
"run_training.setup": {
"total": 0.011372299981303513,
"count": 1,
"self": 0.011372299981303513
},
"TrainerController.start_learning": {
"total": 90862.74552898505,
"count": 1,
"self": 75.61205475765746,
"children": {
"TrainerController._reset_env": {
"total": 12.571995195932686,
"count": 250,
"self": 12.571995195932686
},
"TrainerController.advance": {
"total": 90774.25577048131,
"count": 3382945,
"self": 69.5784866772592,
"children": {
"env_step": {
"total": 70906.86568981281,
"count": 3382945,
"self": 56689.131164202816,
"children": {
"SubprocessEnvManager._take_step": {
"total": 14170.955866511795,
"count": 3382945,
"self": 408.64297854085453,
"children": {
"TorchPolicy.evaluate": {
"total": 13762.31288797094,
"count": 6333988,
"self": 13762.31288797094
}
}
},
"workers": {
"total": 46.77865909819957,
"count": 3382945,
"self": 0.0,
"children": {
"worker_root": {
"total": 90730.07692549855,
"count": 3382945,
"is_parallel": true,
"self": 42695.56536559318,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022973798913881183,
"count": 2,
"is_parallel": true,
"self": 0.0005886418512091041,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017087380401790142,
"count": 8,
"is_parallel": true,
"self": 0.0017087380401790142
}
}
},
"UnityEnvironment.step": {
"total": 0.0332693139789626,
"count": 1,
"is_parallel": true,
"self": 0.0005849449662491679,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009517880389466882,
"count": 1,
"is_parallel": true,
"self": 0.0009517880389466882
},
"communicator.exchange": {
"total": 0.02957025996875018,
"count": 1,
"is_parallel": true,
"self": 0.02957025996875018
},
"steps_from_proto": {
"total": 0.0021623210050165653,
"count": 2,
"is_parallel": true,
"self": 0.00044048589188605547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017218351131305099,
"count": 8,
"is_parallel": true,
"self": 0.0017218351131305099
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 48033.7887676236,
"count": 3382944,
"is_parallel": true,
"self": 2896.6152998108882,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2069.945759323309,
"count": 3382944,
"is_parallel": true,
"self": 2069.945759323309
},
"communicator.exchange": {
"total": 34645.29802252329,
"count": 3382944,
"is_parallel": true,
"self": 34645.29802252329
},
"steps_from_proto": {
"total": 8421.929685966112,
"count": 6765888,
"is_parallel": true,
"self": 1677.9999219384044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6743.929764027707,
"count": 27063552,
"is_parallel": true,
"self": 6743.929764027707
}
}
}
}
},
"steps_from_proto": {
"total": 0.7227922817692161,
"count": 498,
"is_parallel": true,
"self": 0.14150283834896982,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.5812894434202462,
"count": 1992,
"is_parallel": true,
"self": 0.5812894434202462
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19797.81159399124,
"count": 3382945,
"self": 501.774446016876,
"children": {
"process_trajectory": {
"total": 6671.715125667397,
"count": 3382945,
"self": 6639.40557174827,
"children": {
"RLTrainer._checkpoint": {
"total": 32.30955391912721,
"count": 100,
"self": 32.30955391912721
}
}
},
"_update_policy": {
"total": 12624.322022306966,
"count": 2387,
"self": 7249.404454407631,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5374.917567899334,
"count": 71610,
"self": 5374.917567899334
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.580958142876625e-07,
"count": 1,
"self": 6.580958142876625e-07
},
"TrainerController._save_models": {
"total": 0.3057078920537606,
"count": 1,
"self": 0.001581975957378745,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30412591609638184,
"count": 1,
"self": 0.30412591609638184
}
}
}
}
}
}
}