newpush / run_logs /timers.json
Taha101's picture
Push
9160439 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.4995028972625732,
"min": 2.4995028972625732,
"max": 3.2957677841186523,
"count": 200
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 47830.48828125,
"min": 22434.65625,
"max": 134064.078125,
"count": 200
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 60.91463414634146,
"min": 55.30337078651685,
"max": 999.0,
"count": 200
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 9692.0,
"max": 31600.0,
"count": 200
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1426.1425062319192,
"min": 1186.7099008978441,
"max": 1426.1425062319192,
"count": 187
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 233887.37102203476,
"min": 2373.4198017956883,
"max": 251318.45984006597,
"count": 187
},
"SoccerTwos.Step.mean": {
"value": 1999972.0,
"min": 9816.0,
"max": 1999972.0,
"count": 200
},
"SoccerTwos.Step.sum": {
"value": 1999972.0,
"min": 9816.0,
"max": 1999972.0,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.019718406721949577,
"min": -0.10495129972696304,
"max": 0.1504417061805725,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.2141003608703613,
"min": -6.748437881469727,
"max": 23.16802215576172,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.012798120267689228,
"min": -0.10504809767007828,
"max": 0.1550382673740387,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.0860936641693115,
"min": -6.461188316345215,
"max": 23.875892639160156,
"count": 200
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1430552144723436,
"min": -0.572771430015564,
"max": 0.5919840002059936,
"count": 200
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -23.317999958992004,
"min": -32.835999846458435,
"max": 47.9463996887207,
"count": 200
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1430552144723436,
"min": -0.572771430015564,
"max": 0.5919840002059936,
"count": 200
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -23.317999958992004,
"min": -32.835999846458435,
"max": 47.9463996887207,
"count": 200
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0176091943285428,
"min": 0.0112602780466356,
"max": 0.022041657188674436,
"count": 94
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0176091943285428,
"min": 0.0112602780466356,
"max": 0.022041657188674436,
"count": 94
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08248549650112788,
"min": 4.580195563903544e-05,
"max": 0.08429269219438235,
"count": 94
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08248549650112788,
"min": 4.580195563903544e-05,
"max": 0.08429269219438235,
"count": 94
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08579051742951076,
"min": 4.443469200244484e-05,
"max": 0.08590115283926328,
"count": 94
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08579051742951076,
"min": 4.443469200244484e-05,
"max": 0.08590115283926328,
"count": 94
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 94
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 94
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 94
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 94
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 94
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 94
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716145712",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\HP\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn C:/ml-agents-develop/config/poca/SoccerTwos.yaml --env=C:/ml-agents-develop/training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos0 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1716184795"
},
"total": 39101.74470540008,
"count": 1,
"self": 0.5904948002425954,
"children": {
"run_training.setup": {
"total": 0.1176830999320373,
"count": 1,
"self": 0.1176830999320373
},
"TrainerController.start_learning": {
"total": 39101.036527499906,
"count": 1,
"self": 6.388957287184894,
"children": {
"TrainerController._reset_env": {
"total": 7.239464999991469,
"count": 10,
"self": 7.239464999991469
},
"TrainerController.advance": {
"total": 39087.18162571266,
"count": 131498,
"self": 6.725487898453139,
"children": {
"env_step": {
"total": 10731.138303402462,
"count": 131498,
"self": 9572.302502990002,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1154.6130344928242,
"count": 131498,
"self": 40.648934202385135,
"children": {
"TorchPolicy.evaluate": {
"total": 1113.964100290439,
"count": 255240,
"self": 1113.964100290439
}
}
},
"workers": {
"total": 4.222765919636004,
"count": 131498,
"self": 0.0,
"children": {
"worker_root": {
"total": 39086.81556429353,
"count": 131498,
"is_parallel": true,
"self": 30335.635236100527,
"children": {
"steps_from_proto": {
"total": 0.03885550005361438,
"count": 20,
"is_parallel": true,
"self": 0.008186300401575863,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.030669199652038515,
"count": 80,
"is_parallel": true,
"self": 0.030669199652038515
}
}
},
"UnityEnvironment.step": {
"total": 8751.14147269295,
"count": 131498,
"is_parallel": true,
"self": 194.201460301294,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 206.00147668435238,
"count": 131498,
"is_parallel": true,
"self": 206.00147668435238
},
"communicator.exchange": {
"total": 7704.352328105248,
"count": 131498,
"is_parallel": true,
"self": 7704.352328105248
},
"steps_from_proto": {
"total": 646.5862076020567,
"count": 262996,
"is_parallel": true,
"self": 123.98597489856184,
"children": {
"_process_rank_one_or_two_observation": {
"total": 522.6002327034948,
"count": 1051984,
"is_parallel": true,
"self": 522.6002327034948
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 28349.317834411748,
"count": 131498,
"self": 38.030371587025,
"children": {
"process_trajectory": {
"total": 1026.3826382248662,
"count": 131498,
"self": 1025.269487624988,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1131505998782814,
"count": 4,
"self": 1.1131505998782814
}
}
},
"_update_policy": {
"total": 27284.904824599857,
"count": 94,
"self": 600.8709418027429,
"children": {
"TorchPOCAOptimizer.update": {
"total": 26684.033882797114,
"count": 2820,
"self": 26684.033882797114
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.700020559132099e-06,
"count": 1,
"self": 2.700020559132099e-06
},
"TrainerController._save_models": {
"total": 0.22647680004592985,
"count": 1,
"self": 0.0030365000711753964,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22344029997475445,
"count": 1,
"self": 0.22344029997475445
}
}
}
}
}
}
}