TPK-MAKG's picture
Second Push, tweaked num_epochs, batch and buffer size.
8ae74f8 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7339730262756348,
"min": 1.7291780710220337,
"max": 3.2957401275634766,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34124.58984375,
"min": 14281.009765625,
"max": 114414.5546875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 53.42391304347826,
"min": 47.17821782178218,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19660.0,
"min": 16184.0,
"max": 23480.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1658.941110642111,
"min": 1196.117773356657,
"max": 1661.9492174145487,
"count": 492
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 305245.1643581484,
"min": 2396.1925757010513,
"max": 330914.0149784479,
"count": 492
},
"SoccerTwos.Step.mean": {
"value": 4999984.0,
"min": 9230.0,
"max": 4999984.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999984.0,
"min": 9230.0,
"max": 4999984.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.000171336141647771,
"min": -0.08570684492588043,
"max": 0.26223286986351013,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.03152585029602051,
"min": -12.341785430908203,
"max": 35.9259033203125,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0022114384919404984,
"min": -0.09080786257982254,
"max": 0.26804614067077637,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.4069046974182129,
"min": -13.076332092285156,
"max": 36.722320556640625,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.006792392419732135,
"min": -0.42857142857142855,
"max": 0.5431090904907747,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.249800205230713,
"min": -44.853200137615204,
"max": 67.95720052719116,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.006792392419732135,
"min": -0.42857142857142855,
"max": 0.5431090904907747,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.249800205230713,
"min": -44.853200137615204,
"max": 67.95720052719116,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013396647967965691,
"min": 0.010819613545572792,
"max": 0.016110131212008126,
"count": 121
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013396647967965691,
"min": 0.010819613545572792,
"max": 0.016110131212008126,
"count": 121
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08797085620462894,
"min": 0.00015746458529974916,
"max": 0.08797085620462894,
"count": 121
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08797085620462894,
"min": 0.00015746458529974916,
"max": 0.08797085620462894,
"count": 121
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09591558575630188,
"min": 0.00016905759239307371,
"max": 0.09591558575630188,
"count": 121
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09591558575630188,
"min": 0.00016905759239307371,
"max": 0.09591558575630188,
"count": 121
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 121
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 121
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19999999999999998,
"min": 0.19999999999999998,
"max": 0.19999999999999998,
"count": 121
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.19999999999999998,
"min": 0.19999999999999998,
"max": 0.19999999999999998,
"count": 121
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.004999999999999999,
"count": 121
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.004999999999999999,
"min": 0.004999999999999999,
"max": 0.004999999999999999,
"count": 121
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731964305",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\imwat\\Assortment\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=v2-SoccerTwos-bSize --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1731989593"
},
"total": 25288.047431,
"count": 1,
"self": 0.4445756999994046,
"children": {
"run_training.setup": {
"total": 0.12393019999990429,
"count": 1,
"self": 0.12393019999990429
},
"TrainerController.start_learning": {
"total": 25287.4789251,
"count": 1,
"self": 9.427928100292775,
"children": {
"TrainerController._reset_env": {
"total": 10.663112599990654,
"count": 25,
"self": 10.663112599990654
},
"TrainerController.advance": {
"total": 25267.248125199712,
"count": 339100,
"self": 8.507842699134926,
"children": {
"env_step": {
"total": 6021.297513300469,
"count": 339100,
"self": 4662.6947642993955,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1352.7708640009082,
"count": 339100,
"self": 50.66049249997741,
"children": {
"TorchPolicy.evaluate": {
"total": 1302.1103715009308,
"count": 630082,
"self": 1302.1103715009308
}
}
},
"workers": {
"total": 5.831885000165585,
"count": 339100,
"self": 0.0,
"children": {
"worker_root": {
"total": 25268.102838099177,
"count": 339100,
"is_parallel": true,
"self": 21711.939481399357,
"children": {
"steps_from_proto": {
"total": 0.05703280000216182,
"count": 50,
"is_parallel": true,
"self": 0.010950999966553354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04608180003560847,
"count": 200,
"is_parallel": true,
"self": 0.04608180003560847
}
}
},
"UnityEnvironment.step": {
"total": 3556.1063238998167,
"count": 339100,
"is_parallel": true,
"self": 218.3436057012127,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 199.0866322996062,
"count": 339100,
"is_parallel": true,
"self": 199.0866322996062
},
"communicator.exchange": {
"total": 2432.4369884998596,
"count": 339100,
"is_parallel": true,
"self": 2432.4369884998596
},
"steps_from_proto": {
"total": 706.2390973991382,
"count": 678200,
"is_parallel": true,
"self": 141.13796319828361,
"children": {
"_process_rank_one_or_two_observation": {
"total": 565.1011342008546,
"count": 2712800,
"is_parallel": true,
"self": 565.1011342008546
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19237.442769200108,
"count": 339100,
"self": 65.31569800198122,
"children": {
"process_trajectory": {
"total": 1442.9217117981516,
"count": 339100,
"self": 1441.25050559815,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6712062000015067,
"count": 10,
"self": 1.6712062000015067
}
}
},
"_update_policy": {
"total": 17729.205359399974,
"count": 121,
"self": 2229.250943100018,
"children": {
"TorchPOCAOptimizer.update": {
"total": 15499.954416299955,
"count": 9680,
"self": 15499.954416299955
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5000005078036338e-06,
"count": 1,
"self": 1.5000005078036338e-06
},
"TrainerController._save_models": {
"total": 0.1397577000025194,
"count": 1,
"self": 0.002017599999817321,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1377401000027021,
"count": 1,
"self": 0.1377401000027021
}
}
}
}
}
}
}