poca-SoccerTwos / run_logs /timers.json
plegg's picture
First Push`
f263467
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2190823554992676,
"min": 3.169938087463379,
"max": 3.295727491378784,
"count": 69
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 41204.25390625,
"min": 25622.544921875,
"max": 112596.203125,
"count": 69
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 780.0,
"min": 351.3076923076923,
"max": 999.0,
"count": 69
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18720.0,
"min": 15544.0,
"max": 24884.0,
"count": 69
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1208.1742974434487,
"min": 1197.0547973914063,
"max": 1208.9254448382285,
"count": 62
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4832.697189773795,
"min": 2396.006796916041,
"max": 28962.52183966544,
"count": 62
},
"SoccerTwos.Step.mean": {
"value": 689162.0,
"min": 9458.0,
"max": 689162.0,
"count": 69
},
"SoccerTwos.Step.sum": {
"value": 689162.0,
"min": 9458.0,
"max": 689162.0,
"count": 69
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.021979693323373795,
"min": -0.037546150386333466,
"max": -0.003816890297457576,
"count": 69
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.2857360243797302,
"min": -0.9154719114303589,
"max": -0.053396981209516525,
"count": 69
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.023636940866708755,
"min": -0.03766478970646858,
"max": -0.00437122629955411,
"count": 69
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.3072802424430847,
"min": -0.9328320026397705,
"max": -0.06119716912508011,
"count": 69
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 69
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 69
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.011692306170096764,
"min": -0.47635999917984007,
"max": 0.439450000723203,
"count": 69
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.15199998021125793,
"min": -9.527199983596802,
"max": 10.546800017356873,
"count": 69
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.011692306170096764,
"min": -0.47635999917984007,
"max": 0.439450000723203,
"count": 69
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.15199998021125793,
"min": -9.527199983596802,
"max": 10.546800017356873,
"count": 69
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 69
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 69
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018697472709269882,
"min": 0.011650182931528736,
"max": 0.021675959120815,
"count": 32
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018697472709269882,
"min": 0.011650182931528736,
"max": 0.021675959120815,
"count": 32
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0015419688948895781,
"min": 0.0008111949631711468,
"max": 0.009479031975691518,
"count": 32
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0015419688948895781,
"min": 0.0008111949631711468,
"max": 0.009479031975691518,
"count": 32
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0015532162603146087,
"min": 0.0008147206880191031,
"max": 0.009509439890583357,
"count": 32
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0015532162603146087,
"min": 0.0008147206880191031,
"max": 0.009509439890583357,
"count": 32
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 32
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 32
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 32
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 32
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 32
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 32
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678054205",
"python_version": "3.9.16 (main, Mar 1 2023, 18:30:21) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pa-legg\\.conda\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1678056972"
},
"total": 2766.416945,
"count": 1,
"self": 4.136742300000151,
"children": {
"run_training.setup": {
"total": 0.2041550000000001,
"count": 1,
"self": 0.2041550000000001
},
"TrainerController.start_learning": {
"total": 2762.0760477,
"count": 1,
"self": 1.5449750000580025,
"children": {
"TrainerController._reset_env": {
"total": 5.515579199999982,
"count": 4,
"self": 5.515579199999982
},
"TrainerController.advance": {
"total": 2754.8297149999416,
"count": 45231,
"self": 1.6198237999087723,
"children": {
"env_step": {
"total": 1216.7593355000176,
"count": 45231,
"self": 933.3208997000022,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.40685010000936,
"count": 45231,
"self": 10.057651400004033,
"children": {
"TorchPolicy.evaluate": {
"total": 272.34919870000533,
"count": 89728,
"self": 272.34919870000533
}
}
},
"workers": {
"total": 1.0315857000060191,
"count": 45230,
"self": 0.0,
"children": {
"worker_root": {
"total": 2754.643842200031,
"count": 45230,
"is_parallel": true,
"self": 2017.8963663000438,
"children": {
"steps_from_proto": {
"total": 0.01583869999992693,
"count": 8,
"is_parallel": true,
"self": 0.0029214000001260843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.012917299999800846,
"count": 32,
"is_parallel": true,
"self": 0.012917299999800846
}
}
},
"UnityEnvironment.step": {
"total": 736.7316371999873,
"count": 45230,
"is_parallel": true,
"self": 44.47383070001274,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 33.81765680000756,
"count": 45230,
"is_parallel": true,
"self": 33.81765680000756
},
"communicator.exchange": {
"total": 511.00838370000633,
"count": 45230,
"is_parallel": true,
"self": 511.00838370000633
},
"steps_from_proto": {
"total": 147.43176599996073,
"count": 90460,
"is_parallel": true,
"self": 27.583970300014556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 119.84779569994618,
"count": 361840,
"is_parallel": true,
"self": 119.84779569994618
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1536.4505557000155,
"count": 45230,
"self": 10.892842000058408,
"children": {
"process_trajectory": {
"total": 225.61968649995566,
"count": 45230,
"self": 225.3206722999558,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2990141999998741,
"count": 1,
"self": 0.2990141999998741
}
}
},
"_update_policy": {
"total": 1299.9380272000014,
"count": 32,
"self": 143.07737190000603,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1156.8606552999954,
"count": 963,
"self": 1156.8606552999954
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.7000002117129043e-06,
"count": 1,
"self": 1.7000002117129043e-06
},
"TrainerController._save_models": {
"total": 0.1857767999999851,
"count": 1,
"self": 0.005912599999646773,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17986420000033831,
"count": 1,
"self": 0.17986420000033831
}
}
}
}
}
}
}