poca-SoccerTwos / run_logs /timers.json
jackhhhh's picture
First Push`
73ca1d6
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4715203046798706,
"min": 1.450524926185608,
"max": 3.2957208156585693,
"count": 1349
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27311.416015625,
"min": 6500.7763671875,
"max": 112942.0625,
"count": 1349
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 60.91139240506329,
"min": 41.810344827586206,
"max": 999.0,
"count": 1349
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19248.0,
"min": 3996.0,
"max": 31476.0,
"count": 1349
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1633.7630491457198,
"min": 1190.81306429258,
"max": 1674.353935592132,
"count": 1324
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 258134.56176502374,
"min": 2381.62612858516,
"max": 384412.792531291,
"count": 1324
},
"SoccerTwos.Step.mean": {
"value": 13489917.0,
"min": 9124.0,
"max": 13489917.0,
"count": 1349
},
"SoccerTwos.Step.sum": {
"value": 13489917.0,
"min": 9124.0,
"max": 13489917.0,
"count": 1349
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.09469994157552719,
"min": -0.11845909804105759,
"max": 0.16957607865333557,
"count": 1349
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -15.057291030883789,
"min": -19.2597713470459,
"max": 24.0902099609375,
"count": 1349
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09920581430196762,
"min": -0.12321903556585312,
"max": 0.1633630245923996,
"count": 1349
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -15.773724555969238,
"min": -19.122188568115234,
"max": 24.026042938232422,
"count": 1349
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1349
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1349
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.199137105132049,
"min": -0.6167000010609627,
"max": 0.46511453084456617,
"count": 1349
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -31.66279971599579,
"min": -62.93199980258942,
"max": 58.30740034580231,
"count": 1349
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.199137105132049,
"min": -0.6167000010609627,
"max": 0.46511453084456617,
"count": 1349
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -31.66279971599579,
"min": -62.93199980258942,
"max": 58.30740034580231,
"count": 1349
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1349
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1349
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016598315542796626,
"min": 0.010099395599293832,
"max": 0.02617170734059376,
"count": 650
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016598315542796626,
"min": 0.010099395599293832,
"max": 0.02617170734059376,
"count": 650
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1116062785188357,
"min": 2.0480440639403243e-06,
"max": 0.12350986252228419,
"count": 650
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1116062785188357,
"min": 2.0480440639403243e-06,
"max": 0.12350986252228419,
"count": 650
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11325699786345164,
"min": 2.2552148720933475e-06,
"max": 0.1265305275718371,
"count": 650
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11325699786345164,
"min": 2.2552148720933475e-06,
"max": 0.1265305275718371,
"count": 650
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 650
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 650
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 650
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 650
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 650
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 650
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679292035",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\17723\\anconda_3\\envs\\rl3\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./ml-agents/mlagents/training-envs-executables/SoccerTwos --run-id=SoccerTwos3 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.12.1+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1679311086"
},
"total": 19051.9107751,
"count": 1,
"self": 0.15190639999855193,
"children": {
"run_training.setup": {
"total": 0.06460090000000007,
"count": 1,
"self": 0.06460090000000007
},
"TrainerController.start_learning": {
"total": 19051.6942678,
"count": 1,
"self": 16.27303060029226,
"children": {
"TrainerController._reset_env": {
"total": 4.163365299998364,
"count": 68,
"self": 4.163365299998364
},
"TrainerController.advance": {
"total": 19031.04265229971,
"count": 926483,
"self": 16.982015299232444,
"children": {
"env_step": {
"total": 13642.363800001165,
"count": 926483,
"self": 8303.997022900698,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5328.648837799872,
"count": 926483,
"self": 101.08632479912012,
"children": {
"TorchPolicy.evaluate": {
"total": 5227.562513000752,
"count": 1700404,
"self": 5227.562513000752
}
}
},
"workers": {
"total": 9.717939300595443,
"count": 926483,
"self": 0.0,
"children": {
"worker_root": {
"total": 19025.142885899673,
"count": 926483,
"is_parallel": true,
"self": 12309.342948199523,
"children": {
"steps_from_proto": {
"total": 0.07659219999835809,
"count": 136,
"is_parallel": true,
"self": 0.015100400003018155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.061491799995339935,
"count": 544,
"is_parallel": true,
"self": 0.061491799995339935
}
}
},
"UnityEnvironment.step": {
"total": 6715.723345500153,
"count": 926483,
"is_parallel": true,
"self": 316.8574164990423,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 210.22225039903367,
"count": 926483,
"is_parallel": true,
"self": 210.22225039903367
},
"communicator.exchange": {
"total": 5254.63778020078,
"count": 926483,
"is_parallel": true,
"self": 5254.63778020078
},
"steps_from_proto": {
"total": 934.0058984012974,
"count": 1852966,
"is_parallel": true,
"self": 189.24724460020514,
"children": {
"_process_rank_one_or_two_observation": {
"total": 744.7586538010922,
"count": 7411864,
"is_parallel": true,
"self": 744.7586538010922
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5371.696836999314,
"count": 926483,
"self": 113.92905559895189,
"children": {
"process_trajectory": {
"total": 2616.7261714003553,
"count": 926483,
"self": 2611.1648458003583,
"children": {
"RLTrainer._checkpoint": {
"total": 5.561325599996962,
"count": 26,
"self": 5.561325599996962
}
}
},
"_update_policy": {
"total": 2641.0416100000066,
"count": 651,
"self": 1350.7592552000035,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1290.282354800003,
"count": 19542,
"self": 1290.282354800003
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999980541877449e-07,
"count": 1,
"self": 6.999980541877449e-07
},
"TrainerController._save_models": {
"total": 0.21521890000076382,
"count": 1,
"self": 0.0025214999986928888,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21269740000207094,
"count": 1,
"self": 0.21269740000207094
}
}
}
}
}
}
}