poca-SoccerTwos / run_logs /timers.json
S1X3L4's picture
First Push
6efc12f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6823278665542603,
"min": 1.4995876550674438,
"max": 1.7959285974502563,
"count": 1501
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32516.033203125,
"min": 479.8680419921875,
"max": 43333.859375,
"count": 1501
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 74.25757575757575,
"min": 8.0,
"max": 136.64864864864865,
"count": 1501
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19604.0,
"min": 32.0,
"max": 21404.0,
"count": 1501
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1147.8665953764862,
"min": 1084.5960075216822,
"max": 1234.5226613098396,
"count": 1501
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 151518.39058969618,
"min": 2400.0,
"max": 200036.12895520366,
"count": 1501
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1501
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1501
},
"SoccerTwos.Step.mean": {
"value": 49999932.0,
"min": 35009978.0,
"max": 49999932.0,
"count": 1500
},
"SoccerTwos.Step.sum": {
"value": 49999932.0,
"min": 35009978.0,
"max": 49999932.0,
"count": 1500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.024236446246504784,
"min": -0.16502393782138824,
"max": 0.0948086678981781,
"count": 1500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.223447322845459,
"min": -17.155866622924805,
"max": 12.230318069458008,
"count": 1500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.022488346323370934,
"min": -0.16745972633361816,
"max": 0.09007298201322556,
"count": 1500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.990950107574463,
"min": -17.750730514526367,
"max": 11.619414329528809,
"count": 1500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.04841503792239311,
"min": -0.4856056604745253,
"max": 0.39328160190582273,
"count": 1500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -6.439200043678284,
"min": -58.428800106048584,
"max": 49.160200238227844,
"count": 1500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.04841503792239311,
"min": -0.4856056604745253,
"max": 0.39328160190582273,
"count": 1500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -6.439200043678284,
"min": -58.428800106048584,
"max": 49.160200238227844,
"count": 1500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.04268376357657345,
"min": 0.03569160518644933,
"max": 0.04893063144398886,
"count": 726
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.04268376357657345,
"min": 0.03569160518644933,
"max": 0.04893063144398886,
"count": 726
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06262996106555588,
"min": 0.0473321278889974,
"max": 0.07791101605745784,
"count": 726
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06262996106555588,
"min": 0.0473321278889974,
"max": 0.07791101605745784,
"count": 726
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.06422486947032444,
"min": 0.04848482825776987,
"max": 0.08058265910336845,
"count": 726
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.06422486947032444,
"min": 0.04848482825776987,
"max": 0.08058265910336845,
"count": 726
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00030000000000000003,
"min": 0.00030000000000000003,
"max": 0.0003000000000000001,
"count": 726
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00030000000000000003,
"min": 0.00030000000000000003,
"max": 0.0003000000000000001,
"count": 726
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19999999999999998,
"min": 0.19999999999999993,
"max": 0.19999999999999998,
"count": 726
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.19999999999999998,
"min": 0.19999999999999993,
"max": 0.19999999999999998,
"count": 726
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 726
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 726
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690418677",
"python_version": "3.9.17 (main, Jul 5 2023, 20:41:20) \n[GCC 11.2.0]",
"command_line_arguments": "/home/alexis/anaconda3/envs/versus/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos0 --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690470205"
},
"total": 51527.679611329004,
"count": 1,
"self": 0.6461794790084241,
"children": {
"run_training.setup": {
"total": 0.023073071999988315,
"count": 1,
"self": 0.023073071999988315
},
"TrainerController.start_learning": {
"total": 51527.010358777996,
"count": 1,
"self": 37.231464410651824,
"children": {
"TrainerController._reset_env": {
"total": 11.872461443997508,
"count": 76,
"self": 11.872461443997508
},
"TrainerController.advance": {
"total": 51477.28743136634,
"count": 1011465,
"self": 35.89632126504148,
"children": {
"env_step": {
"total": 38753.07606577534,
"count": 1011465,
"self": 30744.57442039963,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7984.230752578262,
"count": 1011465,
"self": 220.57855452041804,
"children": {
"TorchPolicy.evaluate": {
"total": 7763.652198057844,
"count": 1885450,
"self": 7763.652198057844
}
}
},
"workers": {
"total": 24.270892797450415,
"count": 1011465,
"self": 0.0,
"children": {
"worker_root": {
"total": 51452.27876813507,
"count": 1011465,
"is_parallel": true,
"self": 25991.474076333314,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005347751000272183,
"count": 2,
"is_parallel": true,
"self": 0.0018378200004462997,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003509930999825883,
"count": 8,
"is_parallel": true,
"self": 0.003509930999825883
}
}
},
"UnityEnvironment.step": {
"total": 0.04815375799989852,
"count": 1,
"is_parallel": true,
"self": 0.0016551839999010554,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0019433340000887256,
"count": 1,
"is_parallel": true,
"self": 0.0019433340000887256
},
"communicator.exchange": {
"total": 0.04011248400001932,
"count": 1,
"is_parallel": true,
"self": 0.04011248400001932
},
"steps_from_proto": {
"total": 0.004442755999889414,
"count": 2,
"is_parallel": true,
"self": 0.0008932469997944281,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003549509000094986,
"count": 8,
"is_parallel": true,
"self": 0.003549509000094986
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.30144138906121043,
"count": 150,
"is_parallel": true,
"self": 0.059094461090353434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.242346927970857,
"count": 600,
"is_parallel": true,
"self": 0.242346927970857
}
}
},
"UnityEnvironment.step": {
"total": 25460.503250412694,
"count": 1011464,
"is_parallel": true,
"self": 1223.8319172887786,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1342.7371242024376,
"count": 1011464,
"is_parallel": true,
"self": 1342.7371242024376
},
"communicator.exchange": {
"total": 19162.25508771095,
"count": 1011464,
"is_parallel": true,
"self": 19162.25508771095
},
"steps_from_proto": {
"total": 3731.6791212105254,
"count": 2022928,
"is_parallel": true,
"self": 748.2843993071174,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2983.394721903408,
"count": 8091712,
"is_parallel": true,
"self": 2983.394721903408
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 12688.315044325958,
"count": 1011465,
"self": 239.42839476415065,
"children": {
"process_trajectory": {
"total": 4136.992882396729,
"count": 1011465,
"self": 4116.678543270726,
"children": {
"RLTrainer._checkpoint": {
"total": 20.31433912600346,
"count": 31,
"self": 20.31433912600346
}
}
},
"_update_policy": {
"total": 8311.893767165078,
"count": 726,
"self": 4894.886106728573,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3417.007660436505,
"count": 41496,
"self": 3417.007660436505
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.7400016076862812e-06,
"count": 1,
"self": 1.7400016076862812e-06
},
"TrainerController._save_models": {
"total": 0.6189998170011677,
"count": 1,
"self": 0.006280888002947904,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6127189289982198,
"count": 1,
"self": 0.6127189289982198
}
}
}
}
}
}
}