poca-SoccerTwos / run_logs /timers.json
UXAIR's picture
First Push
2221943 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.059171199798584,
"min": 3.059171199798584,
"max": 3.2957303524017334,
"count": 117
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 46890.9765625,
"min": 8924.302734375,
"max": 126969.21875,
"count": 117
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 539.8888888888889,
"min": 402.6363636363636,
"max": 999.0,
"count": 117
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19436.0,
"min": 7992.0,
"max": 28740.0,
"count": 117
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1206.1085289211621,
"min": 1198.680373626865,
"max": 1215.4183054330401,
"count": 104
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 19297.736462738594,
"min": 2402.6316396820293,
"max": 19401.48093718411,
"count": 104
},
"SoccerTwos.Step.mean": {
"value": 1169914.0,
"min": 9386.0,
"max": 1169914.0,
"count": 117
},
"SoccerTwos.Step.sum": {
"value": 1169914.0,
"min": 9386.0,
"max": 1169914.0,
"count": 117
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.005314829759299755,
"min": -0.06455523520708084,
"max": 0.00918043963611126,
"count": 117
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.09566693753004074,
"min": -1.0327775478363037,
"max": 0.14688703417778015,
"count": 117
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.006291152909398079,
"min": -0.06459656357765198,
"max": 0.008282179944217205,
"count": 117
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.11324075609445572,
"min": -1.0333099365234375,
"max": 0.13102498650550842,
"count": 117
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 117
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 117
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.23493333160877228,
"min": -0.610920000076294,
"max": 0.3526499941945076,
"count": 117
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.228799968957901,
"min": -12.218400001525879,
"max": 5.642399907112122,
"count": 117
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.23493333160877228,
"min": -0.610920000076294,
"max": 0.3526499941945076,
"count": 117
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.228799968957901,
"min": -12.218400001525879,
"max": 5.642399907112122,
"count": 117
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 117
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 117
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017703200220906486,
"min": 0.013049109647302734,
"max": 0.020906921201579585,
"count": 54
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017703200220906486,
"min": 0.013049109647302734,
"max": 0.020906921201579585,
"count": 54
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.010448992547268669,
"min": 0.00020793138343530397,
"max": 0.010448992547268669,
"count": 54
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.010448992547268669,
"min": 0.00020793138343530397,
"max": 0.010448992547268669,
"count": 54
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.010450880943487088,
"min": 0.00021911569056101143,
"max": 0.010450880943487088,
"count": 54
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.010450880943487088,
"min": 0.00021911569056101143,
"max": 0.010450880943487088,
"count": 54
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 54
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 54
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 54
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 54
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 54
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 54
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713971955",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713974484"
},
"total": 2529.0015541919997,
"count": 1,
"self": 0.10274598899923149,
"children": {
"run_training.setup": {
"total": 0.057570802000043386,
"count": 1,
"self": 0.057570802000043386
},
"TrainerController.start_learning": {
"total": 2528.8412374010004,
"count": 1,
"self": 1.7045655339825316,
"children": {
"TrainerController._reset_env": {
"total": 3.587944897999762,
"count": 6,
"self": 3.587944897999762
},
"TrainerController.advance": {
"total": 2523.204093412018,
"count": 76412,
"self": 1.8903111280574194,
"children": {
"env_step": {
"total": 2062.180577988964,
"count": 76412,
"self": 1585.9781793540892,
"children": {
"SubprocessEnvManager._take_step": {
"total": 475.11723950589436,
"count": 76412,
"self": 13.271666741895388,
"children": {
"TorchPolicy.evaluate": {
"total": 461.845572763999,
"count": 151622,
"self": 461.845572763999
}
}
},
"workers": {
"total": 1.0851591289806493,
"count": 76411,
"self": 0.0,
"children": {
"worker_root": {
"total": 2524.2408613650623,
"count": 76411,
"is_parallel": true,
"self": 1188.8313308641118,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008904904000019087,
"count": 2,
"is_parallel": true,
"self": 0.0038840060000779886,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0050208979999410985,
"count": 8,
"is_parallel": true,
"self": 0.0050208979999410985
}
}
},
"UnityEnvironment.step": {
"total": 0.04253810400001612,
"count": 1,
"is_parallel": true,
"self": 0.0013427510000383336,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008961439999666254,
"count": 1,
"is_parallel": true,
"self": 0.0008961439999666254
},
"communicator.exchange": {
"total": 0.036317191000023286,
"count": 1,
"is_parallel": true,
"self": 0.036317191000023286
},
"steps_from_proto": {
"total": 0.003982017999987875,
"count": 2,
"is_parallel": true,
"self": 0.000678803999903721,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0033032140000841537,
"count": 8,
"is_parallel": true,
"self": 0.0033032140000841537
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1335.3972622239498,
"count": 76410,
"is_parallel": true,
"self": 84.99141936895694,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 58.67912028001746,
"count": 76410,
"is_parallel": true,
"self": 58.67912028001746
},
"communicator.exchange": {
"total": 926.05539542598,
"count": 76410,
"is_parallel": true,
"self": 926.05539542598
},
"steps_from_proto": {
"total": 265.6713271489956,
"count": 152820,
"is_parallel": true,
"self": 43.53513674702447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 222.13619040197113,
"count": 611280,
"is_parallel": true,
"self": 222.13619040197113
}
}
}
}
},
"steps_from_proto": {
"total": 0.01226827700065769,
"count": 10,
"is_parallel": true,
"self": 0.0024873580007351848,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.009780918999922505,
"count": 40,
"is_parallel": true,
"self": 0.009780918999922505
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 459.13320429499674,
"count": 76411,
"self": 15.2067951220277,
"children": {
"process_trajectory": {
"total": 132.3577217459707,
"count": 76411,
"self": 131.73954964797065,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6181720980000591,
"count": 2,
"self": 0.6181720980000591
}
}
},
"_update_policy": {
"total": 311.56868742699834,
"count": 54,
"self": 184.2608421800016,
"children": {
"TorchPOCAOptimizer.update": {
"total": 127.30784524699675,
"count": 1629,
"self": 127.30784524699675
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5169998732744716e-06,
"count": 1,
"self": 1.5169998732744716e-06
},
"TrainerController._save_models": {
"total": 0.34463203999985126,
"count": 1,
"self": 0.0030709689999639522,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3415610709998873,
"count": 1,
"self": 0.3415610709998873
}
}
}
}
}
}
}