poca-SoccerTwos / run_logs /timers.json
xiawei910's picture
First Push
b0a04cf verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.0496420860290527,
"min": 1.9699488878250122,
"max": 3.2957611083984375,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 40140.19140625,
"min": 24006.47265625,
"max": 114063.3125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 64.9054054054054,
"min": 44.41284403669725,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19212.0,
"min": 13220.0,
"max": 29168.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1508.2401412097033,
"min": 1197.2503267915763,
"max": 1531.330232998366,
"count": 493
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 223219.5408990361,
"min": 2394.5006535831526,
"max": 319620.43788398313,
"count": 493
},
"SoccerTwos.Step.mean": {
"value": 4999923.0,
"min": 9600.0,
"max": 4999923.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999923.0,
"min": 9600.0,
"max": 4999923.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.049563899636268616,
"min": -0.11589053273200989,
"max": 0.15836215019226074,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.385021209716797,
"min": -17.47100067138672,
"max": 24.229408264160156,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04883383587002754,
"min": -0.11587277054786682,
"max": 0.1570761501789093,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.276241302490234,
"min": -16.950939178466797,
"max": 24.032649993896484,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.052849665184149006,
"min": -0.6153846153846154,
"max": 0.5347857155970165,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -7.874600112438202,
"min": -45.040000200271606,
"max": 43.72359982132912,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.052849665184149006,
"min": -0.6153846153846154,
"max": 0.5347857155970165,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -7.874600112438202,
"min": -45.040000200271606,
"max": 43.72359982132912,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016398119295869644,
"min": 0.011055969167500734,
"max": 0.024588115167959283,
"count": 240
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016398119295869644,
"min": 0.011055969167500734,
"max": 0.024588115167959283,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08516770402590433,
"min": 7.862386361618216e-05,
"max": 0.10642808551589648,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08516770402590433,
"min": 7.862386361618216e-05,
"max": 0.10642808551589648,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08607020055254301,
"min": 7.850312686059624e-05,
"max": 0.1089834230641524,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08607020055254301,
"min": 7.850312686059624e-05,
"max": 0.1089834230641524,
"count": 240
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705676443",
"python_version": "3.10.12 (main, Jul 5 2023, 15:34:07) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/weixia/anaconda3/envs/mlagents/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --run-id=SoccerTwos --no-graphics --env=./training-envs-executables/SoccerTwos/SoccerTwos",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2",
"numpy_version": "1.23.5",
"end_time_seconds": "1705715930"
},
"total": 39486.13023382297,
"count": 1,
"self": 0.36589545098831877,
"children": {
"run_training.setup": {
"total": 0.3835012780036777,
"count": 1,
"self": 0.3835012780036777
},
"TrainerController.start_learning": {
"total": 39485.38083709398,
"count": 1,
"self": 8.156851188279688,
"children": {
"TrainerController._reset_env": {
"total": 9.28777388803428,
"count": 25,
"self": 9.28777388803428
},
"TrainerController.advance": {
"total": 39467.70460541861,
"count": 339528,
"self": 8.637577339017298,
"children": {
"env_step": {
"total": 31376.298405682144,
"count": 339528,
"self": 30311.752292448247,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1059.2864602305926,
"count": 339528,
"self": 47.3477015644894,
"children": {
"TorchPolicy.evaluate": {
"total": 1011.9387586661032,
"count": 632088,
"self": 1011.9387586661032
}
}
},
"workers": {
"total": 5.259653003304265,
"count": 339528,
"self": 0.0,
"children": {
"worker_root": {
"total": 39463.83894943661,
"count": 339528,
"is_parallel": true,
"self": 10071.512773024151,
"children": {
"steps_from_proto": {
"total": 0.06006579514360055,
"count": 50,
"is_parallel": true,
"self": 0.010921472101472318,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.049144323042128235,
"count": 200,
"is_parallel": true,
"self": 0.049144323042128235
}
}
},
"UnityEnvironment.step": {
"total": 29392.26611061732,
"count": 339528,
"is_parallel": true,
"self": 101.78308359050425,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 517.6364895065199,
"count": 339528,
"is_parallel": true,
"self": 517.6364895065199
},
"communicator.exchange": {
"total": 27940.528813199024,
"count": 339528,
"is_parallel": true,
"self": 27940.528813199024
},
"steps_from_proto": {
"total": 832.3177243212704,
"count": 679056,
"is_parallel": true,
"self": 151.60361823969288,
"children": {
"_process_rank_one_or_two_observation": {
"total": 680.7141060815775,
"count": 2716224,
"is_parallel": true,
"self": 680.7141060815775
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8082.768622397445,
"count": 339528,
"self": 60.456031884707045,
"children": {
"process_trajectory": {
"total": 1090.7197146839462,
"count": 339528,
"self": 1088.320764566015,
"children": {
"RLTrainer._checkpoint": {
"total": 2.3989501179312356,
"count": 10,
"self": 2.3989501179312356
}
}
},
"_update_policy": {
"total": 6931.592875828792,
"count": 240,
"self": 712.5933588306652,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6218.999516998127,
"count": 7200,
"self": 6218.999516998127
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.860377758741379e-07,
"count": 1,
"self": 9.860377758741379e-07
},
"TrainerController._save_models": {
"total": 0.23160561302211136,
"count": 1,
"self": 0.0013614699710160494,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2302441430510953,
"count": 1,
"self": 0.2302441430510953
}
}
}
}
}
}
}