poca-SoccerTwos / run_logs /timers.json
averydd's picture
First Push
26a56ad verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.240445375442505,
"min": 3.2071869373321533,
"max": 3.2956974506378174,
"count": 27
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 45003.3046875,
"min": 22694.2578125,
"max": 105462.3125,
"count": 27
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 726.5,
"min": 525.875,
"max": 999.0,
"count": 27
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17436.0,
"min": 16828.0,
"max": 24800.0,
"count": 27
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1193.7578446427165,
"min": 1191.414963979109,
"max": 1197.5518243297993,
"count": 26
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4775.031378570866,
"min": 2384.2387313629906,
"max": 19143.40770044412,
"count": 26
},
"SoccerTwos.Step.mean": {
"value": 269976.0,
"min": 9420.0,
"max": 269976.0,
"count": 27
},
"SoccerTwos.Step.sum": {
"value": 269976.0,
"min": 9420.0,
"max": 269976.0,
"count": 27
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.014902826398611069,
"min": -0.02239292860031128,
"max": 0.0017403720412403345,
"count": 27
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.1937367469072342,
"min": -0.35947346687316895,
"max": 0.029586324468255043,
"count": 27
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.011990911327302456,
"min": -0.014891309663653374,
"max": -0.002565356669947505,
"count": 27
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.1558818519115448,
"min": -0.23826095461845398,
"max": -0.0343497060239315,
"count": 27
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 27
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 27
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.042861534998967096,
"min": -0.6866823504952824,
"max": 0.08849411852219526,
"count": 27
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.5571999549865723,
"min": -11.6735999584198,
"max": 1.5044000148773193,
"count": 27
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.042861534998967096,
"min": -0.6866823504952824,
"max": 0.08849411852219526,
"count": 27
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.5571999549865723,
"min": -11.6735999584198,
"max": 1.5044000148773193,
"count": 27
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 27
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 27
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.010117768005390342,
"min": 0.010117768005390342,
"max": 0.01865373881882988,
"count": 12
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.010117768005390342,
"min": 0.010117768005390342,
"max": 0.01865373881882988,
"count": 12
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.003935955084549884,
"min": 0.001508021573924149,
"max": 0.007247587277864416,
"count": 12
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.003935955084549884,
"min": 0.001508021573924149,
"max": 0.007247587277864416,
"count": 12
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.003937759677258631,
"min": 0.0015174899871150652,
"max": 0.007255641728018721,
"count": 12
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.003937759677258631,
"min": 0.0015174899871150652,
"max": 0.007255641728018721,
"count": 12
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 12
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 12
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 12
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 12
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 12
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 12
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731312250",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1731313487"
},
"total": 1236.545987957972,
"count": 1,
"self": 0.004797665984369814,
"children": {
"run_training.setup": {
"total": 0.01639762509148568,
"count": 1,
"self": 0.01639762509148568
},
"TrainerController.start_learning": {
"total": 1236.5247926668962,
"count": 1,
"self": 0.22838388779200613,
"children": {
"TrainerController._reset_env": {
"total": 4.5475917919538915,
"count": 2,
"self": 4.5475917919538915
},
"TrainerController.advance": {
"total": 1231.0880667380989,
"count": 17821,
"self": 0.21842296177055687,
"children": {
"env_step": {
"total": 992.9914524647174,
"count": 17821,
"self": 954.8745469633723,
"children": {
"SubprocessEnvManager._take_step": {
"total": 37.965532288653776,
"count": 17821,
"self": 1.1281631885794923,
"children": {
"TorchPolicy.evaluate": {
"total": 36.837369100074284,
"count": 35360,
"self": 36.837369100074284
}
}
},
"workers": {
"total": 0.15137321269139647,
"count": 17820,
"self": 0.0,
"children": {
"worker_root": {
"total": 1231.8887167989742,
"count": 17820,
"is_parallel": true,
"self": 306.9543487156043,
"children": {
"steps_from_proto": {
"total": 0.00428250094410032,
"count": 4,
"is_parallel": true,
"self": 0.0006960829487070441,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003586417995393276,
"count": 16,
"is_parallel": true,
"self": 0.003586417995393276
}
}
},
"UnityEnvironment.step": {
"total": 924.9300855824258,
"count": 17820,
"is_parallel": true,
"self": 2.6337445022072643,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.928510103025474,
"count": 17820,
"is_parallel": true,
"self": 15.928510103025474
},
"communicator.exchange": {
"total": 872.163203357486,
"count": 17820,
"is_parallel": true,
"self": 872.163203357486
},
"steps_from_proto": {
"total": 34.204627619707026,
"count": 35640,
"is_parallel": true,
"self": 3.7891682606423274,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.415459359064698,
"count": 142560,
"is_parallel": true,
"self": 30.415459359064698
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 237.87819131161086,
"count": 17820,
"self": 2.284961257711984,
"children": {
"process_trajectory": {
"total": 34.421652843710035,
"count": 17820,
"self": 34.421652843710035
},
"_update_policy": {
"total": 201.17157721018884,
"count": 12,
"self": 19.691816289327107,
"children": {
"TorchPOCAOptimizer.update": {
"total": 181.47976092086174,
"count": 360,
"self": 181.47976092086174
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.579816833138466e-07,
"count": 1,
"self": 9.579816833138466e-07
},
"TrainerController._save_models": {
"total": 0.6607492910698056,
"count": 1,
"self": 0.0020934580825269222,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6586558329872787,
"count": 1,
"self": 0.6586558329872787
}
}
}
}
}
}
}