poca-SoccerTwos / run_logs /timers.json
Shridipta-06's picture
First Push
95fc27b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8997670412063599,
"min": 1.840373158454895,
"max": 3.2957375049591064,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 38056.1328125,
"min": 10524.599609375,
"max": 105463.6015625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 52.32631578947368,
"min": 36.097744360902254,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19884.0,
"min": 3996.0,
"max": 28936.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1579.9891392023628,
"min": 1199.95454123925,
"max": 1580.9171718560774,
"count": 496
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 300197.9364484489,
"min": 2401.4076928811,
"max": 407925.43234129134,
"count": 496
},
"SoccerTwos.Step.mean": {
"value": 4999984.0,
"min": 9484.0,
"max": 4999984.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999984.0,
"min": 9484.0,
"max": 4999984.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.000868217321112752,
"min": -0.11656514555215836,
"max": 0.18462947010993958,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.16409307718276978,
"min": -24.012420654296875,
"max": 28.973129272460938,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.00047967469436116517,
"min": -0.11385878920555115,
"max": 0.19237306714057922,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.0906585156917572,
"min": -23.454910278320312,
"max": 30.587318420410156,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.030019048029783543,
"min": -0.5433857142925262,
"max": 0.46706665886773,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.673600077629089,
"min": -55.6139999628067,
"max": 65.4522003531456,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.030019048029783543,
"min": -0.5433857142925262,
"max": 0.46706665886773,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.673600077629089,
"min": -55.6139999628067,
"max": 65.4522003531456,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014888953096427333,
"min": 0.010700619835794593,
"max": 0.022866204695310442,
"count": 241
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014888953096427333,
"min": 0.010700619835794593,
"max": 0.022866204695310442,
"count": 241
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11210248867670695,
"min": 0.0009040514298249037,
"max": 0.12949289282162985,
"count": 241
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11210248867670695,
"min": 0.0009040514298249037,
"max": 0.12949289282162985,
"count": 241
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11408566360672315,
"min": 0.0009649987869824355,
"max": 0.13167524288098018,
"count": 241
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11408566360672315,
"min": 0.0009649987869824355,
"max": 0.13167524288098018,
"count": 241
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 241
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 241
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 241
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 241
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 241
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 241
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687784021",
"python_version": "3.9.16 (main, May 17 2023, 17:49:16) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\HP\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1687814403"
},
"total": 30379.0671534,
"count": 1,
"self": 3.225056199997198,
"children": {
"run_training.setup": {
"total": 0.4295460000000002,
"count": 1,
"self": 0.4295460000000002
},
"TrainerController.start_learning": {
"total": 30375.412551200003,
"count": 1,
"self": 13.868539799113933,
"children": {
"TrainerController._reset_env": {
"total": 83.5566681999994,
"count": 25,
"self": 83.5566681999994
},
"TrainerController.advance": {
"total": 30277.367302100887,
"count": 346113,
"self": 15.163051399264077,
"children": {
"env_step": {
"total": 10664.192402000728,
"count": 346113,
"self": 8449.588225302014,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2206.184901599864,
"count": 346113,
"self": 84.61793239926374,
"children": {
"TorchPolicy.evaluate": {
"total": 2121.5669692006004,
"count": 630058,
"self": 2121.5669692006004
}
}
},
"workers": {
"total": 8.419275098851003,
"count": 346113,
"self": 0.0,
"children": {
"worker_root": {
"total": 30271.26001469957,
"count": 346113,
"is_parallel": true,
"self": 23406.947119200326,
"children": {
"steps_from_proto": {
"total": 0.08018039999564053,
"count": 50,
"is_parallel": true,
"self": 0.01708030000916949,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06310009998647104,
"count": 200,
"is_parallel": true,
"self": 0.06310009998647104
}
}
},
"UnityEnvironment.step": {
"total": 6864.232715099248,
"count": 346113,
"is_parallel": true,
"self": 377.00263039839865,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 362.9385453004808,
"count": 346113,
"is_parallel": true,
"self": 362.9385453004808
},
"communicator.exchange": {
"total": 4899.581217100371,
"count": 346113,
"is_parallel": true,
"self": 4899.581217100371
},
"steps_from_proto": {
"total": 1224.7103222999979,
"count": 692226,
"is_parallel": true,
"self": 249.2090652003617,
"children": {
"_process_rank_one_or_two_observation": {
"total": 975.5012570996362,
"count": 2768904,
"is_parallel": true,
"self": 975.5012570996362
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19598.011848700895,
"count": 346113,
"self": 89.78092410198224,
"children": {
"process_trajectory": {
"total": 3282.7165348989065,
"count": 346113,
"self": 3279.5535615989047,
"children": {
"RLTrainer._checkpoint": {
"total": 3.162973300001795,
"count": 10,
"self": 3.162973300001795
}
}
},
"_update_policy": {
"total": 16225.514389700005,
"count": 241,
"self": 1395.569181400042,
"children": {
"TorchPOCAOptimizer.update": {
"total": 14829.945208299963,
"count": 7239,
"self": 14829.945208299963
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2000018614344299e-06,
"count": 1,
"self": 1.2000018614344299e-06
},
"TrainerController._save_models": {
"total": 0.6200399000008474,
"count": 1,
"self": 0.3289555000010296,
"children": {
"RLTrainer._checkpoint": {
"total": 0.29108439999981783,
"count": 1,
"self": 0.29108439999981783
}
}
}
}
}
}
}