SoccerTwo / run_logs /timers.json
ArunAIML's picture
First Push
f196bcc verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1249186992645264,
"min": 3.1023879051208496,
"max": 3.2957534790039062,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28599.255859375,
"min": 15383.267578125,
"max": 156816.921875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 832.1666666666666,
"min": 272.77777777777777,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19972.0,
"min": 5156.0,
"max": 28748.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1218.463314003117,
"min": 1194.9429334358551,
"max": 1225.964117543023,
"count": 345
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 9747.706512024935,
"min": 2389.8858668717103,
"max": 39090.16441065677,
"count": 345
},
"SoccerTwos.Step.mean": {
"value": 4999098.0,
"min": 9394.0,
"max": 4999098.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999098.0,
"min": 9394.0,
"max": 4999098.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008336874656379223,
"min": -0.014455064199864864,
"max": 0.027775796130299568,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.10004249215126038,
"min": -0.39028674364089966,
"max": 0.41663694381713867,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.008533528074622154,
"min": -0.015282760374248028,
"max": 0.027741296216845512,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.10240233689546585,
"min": -0.3585999310016632,
"max": 0.41611945629119873,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.20611666639645895,
"min": -0.7223826076673425,
"max": 0.44031111233764225,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.4733999967575073,
"min": -16.614799976348877,
"max": 10.413199961185455,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.20611666639645895,
"min": -0.7223826076673425,
"max": 0.44031111233764225,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.4733999967575073,
"min": -16.614799976348877,
"max": 10.413199961185455,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015365945894639783,
"min": 0.010205141287830581,
"max": 0.023643144588762273,
"count": 232
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015365945894639783,
"min": 0.010205141287830581,
"max": 0.023643144588762273,
"count": 232
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0076258563591788215,
"min": 2.4769114818449604e-08,
"max": 0.00910759853820006,
"count": 232
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0076258563591788215,
"min": 2.4769114818449604e-08,
"max": 0.00910759853820006,
"count": 232
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.007699296902865171,
"min": 1.8153060565850864e-08,
"max": 0.009323406747231882,
"count": 232
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.007699296902865171,
"min": 1.8153060565850864e-08,
"max": 0.009323406747231882,
"count": 232
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 232
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 232
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 232
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 232
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 232
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 232
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723376269",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/home/arunachalam/rl-agents/agents-env/bin/mlagents-learn ./config/Soccer2player.yaml --env=./training-env-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1723390301"
},
"total": 14032.629968817004,
"count": 1,
"self": 2.1376076599990483,
"children": {
"run_training.setup": {
"total": 0.009511717002169462,
"count": 1,
"self": 0.009511717002169462
},
"TrainerController.start_learning": {
"total": 14030.482849440003,
"count": 1,
"self": 6.223035381401132,
"children": {
"TrainerController._reset_env": {
"total": 5.502213287010818,
"count": 25,
"self": 5.502213287010818
},
"TrainerController.advance": {
"total": 14018.386988909595,
"count": 325177,
"self": 7.445543163314142,
"children": {
"env_step": {
"total": 5811.877426455721,
"count": 325177,
"self": 4525.175247842737,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1282.1815960891036,
"count": 325177,
"self": 50.21678608264483,
"children": {
"TorchPolicy.evaluate": {
"total": 1231.9648100064587,
"count": 645138,
"self": 1231.9648100064587
}
}
},
"workers": {
"total": 4.520582523880876,
"count": 325177,
"self": 0.0,
"children": {
"worker_root": {
"total": 14018.839571044471,
"count": 325177,
"is_parallel": true,
"self": 10324.883514666533,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017830469987529796,
"count": 2,
"is_parallel": true,
"self": 0.0004381180006021168,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013449289981508628,
"count": 8,
"is_parallel": true,
"self": 0.0013449289981508628
}
}
},
"UnityEnvironment.step": {
"total": 0.029157692999433493,
"count": 1,
"is_parallel": true,
"self": 0.0007788169968989678,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0016391909994126763,
"count": 1,
"is_parallel": true,
"self": 0.0016391909994126763
},
"communicator.exchange": {
"total": 0.02444483900035266,
"count": 1,
"is_parallel": true,
"self": 0.02444483900035266
},
"steps_from_proto": {
"total": 0.002294846002769191,
"count": 2,
"is_parallel": true,
"self": 0.0004378710036689881,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018569749991002027,
"count": 8,
"is_parallel": true,
"self": 0.0018569749991002027
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3693.9236704689356,
"count": 325176,
"is_parallel": true,
"self": 228.1241314480685,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 188.81007460401452,
"count": 325176,
"is_parallel": true,
"self": 188.81007460401452
},
"communicator.exchange": {
"total": 2613.135058785261,
"count": 325176,
"is_parallel": true,
"self": 2613.135058785261
},
"steps_from_proto": {
"total": 663.8544056315914,
"count": 650352,
"is_parallel": true,
"self": 126.36630100009279,
"children": {
"_process_rank_one_or_two_observation": {
"total": 537.4881046314986,
"count": 2601408,
"is_parallel": true,
"self": 537.4881046314986
}
}
}
}
},
"steps_from_proto": {
"total": 0.03238590900218696,
"count": 48,
"is_parallel": true,
"self": 0.006737916010024492,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.02564799299216247,
"count": 192,
"is_parallel": true,
"self": 0.02564799299216247
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8199.06401929056,
"count": 325177,
"self": 57.65157899947735,
"children": {
"process_trajectory": {
"total": 1019.5925448150774,
"count": 325177,
"self": 1013.7635504400787,
"children": {
"RLTrainer._checkpoint": {
"total": 5.828994374998729,
"count": 10,
"self": 5.828994374998729
}
}
},
"_update_policy": {
"total": 7121.819895476005,
"count": 232,
"self": 583.2135720981714,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6538.606323377833,
"count": 6969,
"self": 6538.606323377833
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.989947334863245e-07,
"count": 1,
"self": 7.989947334863245e-07
},
"TrainerController._save_models": {
"total": 0.3706110630009789,
"count": 1,
"self": 0.08323866900173016,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28737239399924874,
"count": 1,
"self": 0.28737239399924874
}
}
}
}
}
}
}