poca-SoccerTwos / run_logs /timers.json
amannlp's picture
First Push
4425466
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1690661907196045,
"min": 3.151979923248291,
"max": 3.2956528663635254,
"count": 105
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 21904.5859375,
"min": 18057.224609375,
"max": 122713.40625,
"count": 105
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 485.4,
"max": 999.0,
"count": 105
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 3996.0,
"max": 29120.0,
"count": 105
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1203.8460950704746,
"min": 1188.2073283492277,
"max": 1203.8460950704746,
"count": 80
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2407.692190140949,
"min": 2376.4146566984555,
"max": 16749.3737071215,
"count": 80
},
"SoccerTwos.Step.mean": {
"value": 1049202.0,
"min": 9564.0,
"max": 1049202.0,
"count": 105
},
"SoccerTwos.Step.sum": {
"value": 1049202.0,
"min": 9564.0,
"max": 1049202.0,
"count": 105
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.009214774705469608,
"min": -0.014021429233253002,
"max": 0.057640738785266876,
"count": 105
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.09214774519205093,
"min": -0.20137549936771393,
"max": 0.7492830753326416,
"count": 105
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.008726662024855614,
"min": -0.012422624044120312,
"max": 0.057669639587402344,
"count": 105
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.08726661652326584,
"min": -0.19789908826351166,
"max": 0.7497053146362305,
"count": 105
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 105
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 105
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.5,
"max": 0.2883428548063551,
"count": 105
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -6.0,
"max": 4.195999920368195,
"count": 105
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.5,
"max": 0.2883428548063551,
"count": 105
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -6.0,
"max": 4.195999920368195,
"count": 105
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 105
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 105
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016605024497645598,
"min": 0.012622617522720248,
"max": 0.02128681237033258,
"count": 48
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016605024497645598,
"min": 0.012622617522720248,
"max": 0.02128681237033258,
"count": 48
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0010955231729894877,
"min": 7.611720519662412e-07,
"max": 0.005668452560591201,
"count": 48
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0010955231729894877,
"min": 7.611720519662412e-07,
"max": 0.005668452560591201,
"count": 48
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0010903837983884538,
"min": 8.060076709170972e-07,
"max": 0.005666462937369943,
"count": 48
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0010903837983884538,
"min": 8.060076709170972e-07,
"max": 0.005666462937369943,
"count": 48
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 48
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 48
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 48
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 48
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 48
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 48
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685217865",
"python_version": "3.9.16 (main, Mar 8 2023, 04:29:44) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/amantyagi/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1685225828"
},
"total": 7963.3778710510005,
"count": 1,
"self": 0.216018621999865,
"children": {
"run_training.setup": {
"total": 0.09559221399999984,
"count": 1,
"self": 0.09559221399999984
},
"TrainerController.start_learning": {
"total": 7963.0662602150005,
"count": 1,
"self": 1.241843105949556,
"children": {
"TrainerController._reset_env": {
"total": 3.8528853860003203,
"count": 6,
"self": 3.8528853860003203
},
"TrainerController.advance": {
"total": 7957.802629458051,
"count": 68748,
"self": 1.371224475060444,
"children": {
"env_step": {
"total": 5444.3411687190255,
"count": 68748,
"self": 5268.483743151881,
"children": {
"SubprocessEnvManager._take_step": {
"total": 175.00287557308133,
"count": 68748,
"self": 6.7686340449253635,
"children": {
"TorchPolicy.evaluate": {
"total": 168.23424152815596,
"count": 136556,
"self": 168.23424152815596
}
}
},
"workers": {
"total": 0.854549994063051,
"count": 68747,
"self": 0.0,
"children": {
"worker_root": {
"total": 7957.366129595119,
"count": 68747,
"is_parallel": true,
"self": 2855.243861068111,
"children": {
"steps_from_proto": {
"total": 0.01584365599785853,
"count": 12,
"is_parallel": true,
"self": 0.0022737569952404613,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.013569899002618069,
"count": 48,
"is_parallel": true,
"self": 0.013569899002618069
}
}
},
"UnityEnvironment.step": {
"total": 5102.10642487101,
"count": 68747,
"is_parallel": true,
"self": 12.55071948098157,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 97.95879548593122,
"count": 68747,
"is_parallel": true,
"self": 97.95879548593122
},
"communicator.exchange": {
"total": 4816.78395650804,
"count": 68747,
"is_parallel": true,
"self": 4816.78395650804
},
"steps_from_proto": {
"total": 174.81295339605614,
"count": 137494,
"is_parallel": true,
"self": 24.495120769899188,
"children": {
"_process_rank_one_or_two_observation": {
"total": 150.31783262615696,
"count": 549976,
"is_parallel": true,
"self": 150.31783262615696
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2512.0902362639654,
"count": 68747,
"self": 9.464015075838233,
"children": {
"process_trajectory": {
"total": 351.12678933712414,
"count": 68747,
"self": 350.740826840125,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3859624969991273,
"count": 2,
"self": 0.3859624969991273
}
}
},
"_update_policy": {
"total": 2151.499431851003,
"count": 48,
"self": 144.52101408301087,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2006.9784177679921,
"count": 1458,
"self": 2006.9784177679921
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1489992175484076e-06,
"count": 1,
"self": 1.1489992175484076e-06
},
"TrainerController._save_models": {
"total": 0.16890111599968805,
"count": 1,
"self": 0.004239751999193686,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16466136400049436,
"count": 1,
"self": 0.16466136400049436
}
}
}
}
}
}
}