daripaez's picture
First Push
5912ba8
raw
history blame
15.5 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.9110393524169922,
"min": 1.9110393524169922,
"max": 3.2957024574279785,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37242.3359375,
"min": 24007.6796875,
"max": 135802.28125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 88.42857142857143,
"min": 55.90909090909091,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19808.0,
"min": 15084.0,
"max": 26688.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1441.3043274880604,
"min": 1196.5416096826411,
"max": 1455.789326012529,
"count": 436
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 161426.08467866277,
"min": 2395.6211310752565,
"max": 244595.4684624814,
"count": 436
},
"SoccerTwos.Step.mean": {
"value": 4999944.0,
"min": 9714.0,
"max": 4999944.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999944.0,
"min": 9714.0,
"max": 4999944.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0650494396686554,
"min": -0.12277369946241379,
"max": 0.2998342216014862,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 7.220487594604492,
"min": -16.083354949951172,
"max": 21.741439819335938,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0664365217089653,
"min": -0.12979534268379211,
"max": 0.2913816571235657,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 7.374454021453857,
"min": -17.003189086914062,
"max": 21.107614517211914,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.006140540312002371,
"min": -0.543624997138977,
"max": 0.586807015564358,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.6815999746322632,
"min": -50.15079987049103,
"max": 47.421999514102936,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.006140540312002371,
"min": -0.543624997138977,
"max": 0.586807015564358,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.6815999746322632,
"min": -50.15079987049103,
"max": 47.421999514102936,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.021432880698121153,
"min": 0.012810339027200825,
"max": 0.027839673003181815,
"count": 236
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.021432880698121153,
"min": 0.012810339027200825,
"max": 0.027839673003181815,
"count": 236
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06592842318117619,
"min": 4.4571302339591055e-07,
"max": 0.08229981143493205,
"count": 236
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06592842318117619,
"min": 4.4571302339591055e-07,
"max": 0.08229981143493205,
"count": 236
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.06978704802691936,
"min": 4.809593065147055e-07,
"max": 0.0842959958105348,
"count": 236
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.06978704802691936,
"min": 4.809593065147055e-07,
"max": 0.0842959958105348,
"count": 236
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0006,
"min": 0.0006,
"max": 0.0006000000000000001,
"count": 236
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0006,
"min": 0.0006,
"max": 0.0006000000000000001,
"count": 236
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19999999999999996,
"min": 0.1999999999999999,
"max": 0.19999999999999996,
"count": 236
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.19999999999999996,
"min": 0.1999999999999999,
"max": 0.19999999999999996,
"count": 236
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 236
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 236
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675775588",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\paezd\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos-v1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1675837913"
},
"total": 62324.6388479,
"count": 1,
"self": 2.618733600000269,
"children": {
"run_training.setup": {
"total": 0.5052632999999993,
"count": 1,
"self": 0.5052632999999993
},
"TrainerController.start_learning": {
"total": 62321.514851,
"count": 1,
"self": 14.648648999776924,
"children": {
"TrainerController._reset_env": {
"total": 20.56799130001208,
"count": 25,
"self": 20.56799130001208
},
"TrainerController.advance": {
"total": 62285.91002610021,
"count": 331538,
"self": 14.970814998530841,
"children": {
"env_step": {
"total": 11368.589502499704,
"count": 331538,
"self": 8574.913150200731,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2784.000924300136,
"count": 331538,
"self": 101.00345810150702,
"children": {
"TorchPolicy.evaluate": {
"total": 2682.997466198629,
"count": 637834,
"self": 2682.997466198629
}
}
},
"workers": {
"total": 9.67542799883742,
"count": 331538,
"self": 0.0,
"children": {
"worker_root": {
"total": 62281.79811689989,
"count": 331538,
"is_parallel": true,
"self": 55473.7478587975,
"children": {
"steps_from_proto": {
"total": 0.09758040000690471,
"count": 50,
"is_parallel": true,
"self": 0.019795399977599715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.077785000029305,
"count": 200,
"is_parallel": true,
"self": 0.077785000029305
}
}
},
"UnityEnvironment.step": {
"total": 6807.95267770238,
"count": 331538,
"is_parallel": true,
"self": 435.2586193040679,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 383.27107130022813,
"count": 331538,
"is_parallel": true,
"self": 383.27107130022813
},
"communicator.exchange": {
"total": 4686.934170700618,
"count": 331538,
"is_parallel": true,
"self": 4686.934170700618
},
"steps_from_proto": {
"total": 1302.488816397466,
"count": 663076,
"is_parallel": true,
"self": 257.52965970016794,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1044.959156697298,
"count": 2652304,
"is_parallel": true,
"self": 1044.959156697298
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 50902.34970860197,
"count": 331538,
"self": 101.50628510098613,
"children": {
"process_trajectory": {
"total": 12730.84208630096,
"count": 331538,
"self": 12727.872671500954,
"children": {
"RLTrainer._checkpoint": {
"total": 2.9694148000053247,
"count": 10,
"self": 2.9694148000053247
}
}
},
"_update_policy": {
"total": 38070.001337200025,
"count": 236,
"self": 2217.889489600646,
"children": {
"TorchPOCAOptimizer.update": {
"total": 35852.11184759938,
"count": 11805,
"self": 35852.11184759938
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.7999998312443495e-06,
"count": 1,
"self": 3.7999998312443495e-06
},
"TrainerController._save_models": {
"total": 0.3881808000005549,
"count": 1,
"self": 0.06439210000098683,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32378869999956805,
"count": 1,
"self": 0.32378869999956805
}
}
}
}
}
}
}