poca-SoccerTwos / run_logs /timers.json
vind's picture
First Push
7b3621b
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5180662870407104,
"min": 1.4537625312805176,
"max": 3.295696258544922,
"count": 1751
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 29001.138671875,
"min": 19740.61328125,
"max": 105462.28125,
"count": 1751
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 57.51162790697674,
"min": 38.330708661417326,
"max": 999.0,
"count": 1751
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19784.0,
"min": 8732.0,
"max": 27588.0,
"count": 1751
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1514.9747793086233,
"min": 1188.4667000396976,
"max": 1575.5373644724887,
"count": 1748
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 260575.6620410832,
"min": 2388.11500602936,
"max": 383159.1370412271,
"count": 1748
},
"SoccerTwos.Step.mean": {
"value": 17509985.0,
"min": 9450.0,
"max": 17509985.0,
"count": 1751
},
"SoccerTwos.Step.sum": {
"value": 17509985.0,
"min": 9450.0,
"max": 17509985.0,
"count": 1751
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.020330609753727913,
"min": -0.14643694460391998,
"max": 0.17252300679683685,
"count": 1751
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.4968650341033936,
"min": -25.919340133666992,
"max": 24.193138122558594,
"count": 1751
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.019250068813562393,
"min": -0.14864228665828705,
"max": 0.17098000645637512,
"count": 1751
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.311011791229248,
"min": -26.30968475341797,
"max": 24.28445816040039,
"count": 1751
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1751
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1751
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.16332209283529325,
"min": -0.6671789461060574,
"max": 0.5629405435678121,
"count": 1751
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 28.09139996767044,
"min": -60.07320040464401,
"max": 52.2077996134758,
"count": 1751
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.16332209283529325,
"min": -0.6671789461060574,
"max": 0.5629405435678121,
"count": 1751
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 28.09139996767044,
"min": -60.07320040464401,
"max": 52.2077996134758,
"count": 1751
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1751
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1751
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017533206019046097,
"min": 0.009657192969461903,
"max": 0.025968049650934213,
"count": 847
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017533206019046097,
"min": 0.009657192969461903,
"max": 0.025968049650934213,
"count": 847
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09411052390933036,
"min": 0.00092769174661953,
"max": 0.12981676012277604,
"count": 847
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09411052390933036,
"min": 0.00092769174661953,
"max": 0.12981676012277604,
"count": 847
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09523837566375733,
"min": 0.0009241009009807992,
"max": 0.13323278203606606,
"count": 847
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09523837566375733,
"min": 0.0009241009009807992,
"max": 0.13323278203606606,
"count": 847
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 847
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 847
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 847
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 847
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 847
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 847
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685610016",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/home/ai/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1685626797"
},
"total": 16781.088473484007,
"count": 1,
"self": 0.0027533890097402036,
"children": {
"run_training.setup": {
"total": 0.005900299991481006,
"count": 1,
"self": 0.005900299991481006
},
"TrainerController.start_learning": {
"total": 16781.079819795006,
"count": 1,
"self": 16.76027166834683,
"children": {
"TrainerController._reset_env": {
"total": 6.7389328529679915,
"count": 88,
"self": 6.7389328529679915
},
"TrainerController.advance": {
"total": 16757.257331969697,
"count": 1216495,
"self": 16.612954382479074,
"children": {
"env_step": {
"total": 12173.069506224012,
"count": 1216495,
"self": 8998.111707923963,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3165.3512335359264,
"count": 1216495,
"self": 91.6559871135396,
"children": {
"TorchPolicy.evaluate": {
"total": 3073.695246422387,
"count": 2201686,
"self": 3073.695246422387
}
}
},
"workers": {
"total": 9.606564764122595,
"count": 1216494,
"self": 0.0,
"children": {
"worker_root": {
"total": 16759.442294077628,
"count": 1216494,
"is_parallel": true,
"self": 9441.820313411052,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012385250010993332,
"count": 2,
"is_parallel": true,
"self": 0.0003075519925914705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009309730085078627,
"count": 8,
"is_parallel": true,
"self": 0.0009309730085078627
}
}
},
"UnityEnvironment.step": {
"total": 0.013737829998717643,
"count": 1,
"is_parallel": true,
"self": 0.0003534809948178008,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002459980023559183,
"count": 1,
"is_parallel": true,
"self": 0.0002459980023559183
},
"communicator.exchange": {
"total": 0.01212592699448578,
"count": 1,
"is_parallel": true,
"self": 0.01212592699448578
},
"steps_from_proto": {
"total": 0.0010124240070581436,
"count": 2,
"is_parallel": true,
"self": 0.00021697398915421218,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007954500179039314,
"count": 8,
"is_parallel": true,
"self": 0.0007954500179039314
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 7317.5358168534,
"count": 1216493,
"is_parallel": true,
"self": 423.9135033531056,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 272.44869865241344,
"count": 1216493,
"is_parallel": true,
"self": 272.44869865241344
},
"communicator.exchange": {
"total": 5442.560499921397,
"count": 1216493,
"is_parallel": true,
"self": 5442.560499921397
},
"steps_from_proto": {
"total": 1178.6131149264838,
"count": 2432986,
"is_parallel": true,
"self": 246.49301429046318,
"children": {
"_process_rank_one_or_two_observation": {
"total": 932.1201006360207,
"count": 9731944,
"is_parallel": true,
"self": 932.1201006360207
}
}
}
}
},
"steps_from_proto": {
"total": 0.08616381317551713,
"count": 174,
"is_parallel": true,
"self": 0.01794812228763476,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06821569088788237,
"count": 696,
"is_parallel": true,
"self": 0.06821569088788237
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4567.574871363206,
"count": 1216494,
"self": 99.60447046456102,
"children": {
"process_trajectory": {
"total": 1860.6178297195584,
"count": 1216494,
"self": 1852.5881380495994,
"children": {
"RLTrainer._checkpoint": {
"total": 8.029691669958993,
"count": 35,
"self": 8.029691669958993
}
}
},
"_update_policy": {
"total": 2607.352571179086,
"count": 848,
"self": 1496.101430622337,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1111.2511405567493,
"count": 25446,
"self": 1111.2511405567493
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.549977792426944e-07,
"count": 1,
"self": 8.549977792426944e-07
},
"TrainerController._save_models": {
"total": 0.32328244899690617,
"count": 1,
"self": 0.0010382890031905845,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3222441599937156,
"count": 1,
"self": 0.3222441599937156
}
}
}
}
}
}
}