poca-SoccerTwos / run_logs /timers.json
triksblade's picture
Continued Training
6c9b48f verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9214180707931519,
"min": 1.862352967262268,
"max": 2.0375585556030273,
"count": 171
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 39473.61328125,
"min": 14372.5556640625,
"max": 44608.6640625,
"count": 171
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 49.58585858585859,
"min": 40.706896551724135,
"max": 71.81428571428572,
"count": 171
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19636.0,
"min": 6180.0,
"max": 20260.0,
"count": 171
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1585.7677991109404,
"min": 1553.5179580138852,
"max": 1610.7972019352037,
"count": 171
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 313982.0242239662,
"min": 94213.86956426132,
"max": 394536.63157583587,
"count": 171
},
"SoccerTwos.Step.mean": {
"value": 5139959.0,
"min": 3439951.0,
"max": 5139959.0,
"count": 171
},
"SoccerTwos.Step.sum": {
"value": 5139959.0,
"min": 3439951.0,
"max": 5139959.0,
"count": 171
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.060572218149900436,
"min": -0.08898256719112396,
"max": 0.1005156859755516,
"count": 171
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -12.053871154785156,
"min": -17.240734100341797,
"max": 17.89179229736328,
"count": 171
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.05699760094285011,
"min": -0.087178535759449,
"max": 0.09999766200780869,
"count": 171
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -11.342522621154785,
"min": -16.390785217285156,
"max": 17.799583435058594,
"count": 171
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 171
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 171
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1673165817356589,
"min": -0.2861095484177671,
"max": 0.22272921378692884,
"count": 171
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -33.29599976539612,
"min": -56.93580013513565,
"max": 39.645800054073334,
"count": 171
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1673165817356589,
"min": -0.2861095484177671,
"max": 0.22272921378692884,
"count": 171
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -33.29599976539612,
"min": -56.93580013513565,
"max": 39.645800054073334,
"count": 171
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 171
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 171
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018759650674959025,
"min": 0.01115469560221148,
"max": 0.021448074255992346,
"count": 82
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018759650674959025,
"min": 0.01115469560221148,
"max": 0.021448074255992346,
"count": 82
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11596904620528221,
"min": 0.09423803612589836,
"max": 0.12632074877619742,
"count": 82
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11596904620528221,
"min": 0.09423803612589836,
"max": 0.12632074877619742,
"count": 82
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11812345335880915,
"min": 0.0952321524421374,
"max": 0.12842872540156047,
"count": 82
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11812345335880915,
"min": 0.0952321524421374,
"max": 0.12842872540156047,
"count": 82
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 82
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 82
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 82
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 82
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 82
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 82
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716454994",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\ASUSROG\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1716467265"
},
"total": 12270.348834600001,
"count": 1,
"self": 1.1021543000024394,
"children": {
"run_training.setup": {
"total": 0.3115802999996049,
"count": 1,
"self": 0.3115802999996049
},
"TrainerController.start_learning": {
"total": 12268.935099999999,
"count": 1,
"self": 8.741336000073716,
"children": {
"TrainerController._reset_env": {
"total": 24.339133400000264,
"count": 10,
"self": 24.339133400000264
},
"TrainerController.advance": {
"total": 12235.582564599925,
"count": 119838,
"self": 8.518548199779616,
"children": {
"env_step": {
"total": 6489.675793600161,
"count": 119838,
"self": 5023.78484800052,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1460.3441352998884,
"count": 119838,
"self": 46.86335090045304,
"children": {
"TorchPolicy.evaluate": {
"total": 1413.4807843994354,
"count": 214308,
"self": 1413.4807843994354
}
}
},
"workers": {
"total": 5.546810299753361,
"count": 119838,
"self": 0.0,
"children": {
"worker_root": {
"total": 12204.119910299984,
"count": 119838,
"is_parallel": true,
"self": 8252.098853600017,
"children": {
"steps_from_proto": {
"total": 0.0566629999966608,
"count": 20,
"is_parallel": true,
"self": 0.011537199997746939,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04512579999891386,
"count": 80,
"is_parallel": true,
"self": 0.04512579999891386
}
}
},
"UnityEnvironment.step": {
"total": 3951.9643936999705,
"count": 119838,
"is_parallel": true,
"self": 227.40118399990342,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 221.29594099971882,
"count": 119838,
"is_parallel": true,
"self": 221.29594099971882
},
"communicator.exchange": {
"total": 2718.435361500345,
"count": 119838,
"is_parallel": true,
"self": 2718.435361500345
},
"steps_from_proto": {
"total": 784.8319072000031,
"count": 239676,
"is_parallel": true,
"self": 154.5026458000434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 630.3292613999597,
"count": 958704,
"is_parallel": true,
"self": 630.3292613999597
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5737.388222799984,
"count": 119838,
"self": 47.69191030006823,
"children": {
"process_trajectory": {
"total": 1710.6069001999222,
"count": 119838,
"self": 1709.2757095999232,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3311905999989904,
"count": 4,
"self": 1.3311905999989904
}
}
},
"_update_policy": {
"total": 3979.0894122999935,
"count": 83,
"self": 503.9244860999961,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3475.1649261999974,
"count": 2480,
"self": 3475.1649261999974
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.599999788799323e-06,
"count": 1,
"self": 2.599999788799323e-06
},
"TrainerController._save_models": {
"total": 0.2720633999997517,
"count": 1,
"self": 0.014902599999913946,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25716079999983776,
"count": 1,
"self": 0.25716079999983776
}
}
}
}
}
}
}