poca-SoccerTwos / run_logs /timers.json
jiaqihe's picture
First Push
0c011f1 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5075193643569946,
"min": 1.4223438501358032,
"max": 3.29575777053833,
"count": 1684
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30536.3125,
"min": 21724.7421875,
"max": 172076.078125,
"count": 1684
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 49.628865979381445,
"min": 36.97727272727273,
"max": 999.0,
"count": 1684
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19256.0,
"min": 16400.0,
"max": 24280.0,
"count": 1684
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1548.8406443673402,
"min": 1202.9386188779174,
"max": 1588.4355426754382,
"count": 1616
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 300475.085007264,
"min": 2405.877237755835,
"max": 404247.29001054686,
"count": 1616
},
"SoccerTwos.Step.mean": {
"value": 16839944.0,
"min": 9162.0,
"max": 16839944.0,
"count": 1684
},
"SoccerTwos.Step.sum": {
"value": 16839944.0,
"min": 9162.0,
"max": 16839944.0,
"count": 1684
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.010396160185337067,
"min": -0.13093166053295135,
"max": 0.16393695771694183,
"count": 1684
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 2.016855001449585,
"min": -24.76071548461914,
"max": 27.682390213012695,
"count": 1684
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.012765331193804741,
"min": -0.12848448753356934,
"max": 0.18016555905342102,
"count": 1684
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 2.4764742851257324,
"min": -25.138809204101562,
"max": 27.552812576293945,
"count": 1684
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1684
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1684
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.02118144330290175,
"min": -0.6553411764257094,
"max": 0.46176469939596515,
"count": 1684
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 4.1092000007629395,
"min": -56.14080011844635,
"max": 59.756800055503845,
"count": 1684
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.02118144330290175,
"min": -0.6553411764257094,
"max": 0.46176469939596515,
"count": 1684
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 4.1092000007629395,
"min": -56.14080011844635,
"max": 59.756800055503845,
"count": 1684
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1684
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1684
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020544298365712164,
"min": 0.00970811143379251,
"max": 0.025192880642134697,
"count": 809
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020544298365712164,
"min": 0.00970811143379251,
"max": 0.025192880642134697,
"count": 809
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11354104181130727,
"min": 8.301274097751351e-07,
"max": 0.12650607749819756,
"count": 809
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11354104181130727,
"min": 8.301274097751351e-07,
"max": 0.12650607749819756,
"count": 809
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11532666459679604,
"min": 9.456767202209449e-07,
"max": 0.12839160487055779,
"count": 809
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11532666459679604,
"min": 9.456767202209449e-07,
"max": 0.12839160487055779,
"count": 809
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 809
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 809
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 809
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 809
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 809
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 809
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1729838318",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/opt/anaconda3/envs/rl3/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0",
"numpy_version": "1.23.5",
"end_time_seconds": "1729885903"
},
"total": 47583.77235337498,
"count": 1,
"self": 0.15778812498319894,
"children": {
"run_training.setup": {
"total": 0.014746833010576665,
"count": 1,
"self": 0.014746833010576665
},
"TrainerController.start_learning": {
"total": 47583.59981841699,
"count": 1,
"self": 10.320242145215161,
"children": {
"TrainerController._reset_env": {
"total": 5.96240825462155,
"count": 85,
"self": 5.96240825462155
},
"TrainerController.advance": {
"total": 47567.240067725186,
"count": 1155809,
"self": 8.499234364717267,
"children": {
"env_step": {
"total": 38672.649586675456,
"count": 1155809,
"self": 37406.00112313824,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1260.434232891188,
"count": 1155809,
"self": 39.95918971253559,
"children": {
"TorchPolicy.evaluate": {
"total": 1220.4750431786524,
"count": 2128420,
"self": 1220.4750431786524
}
}
},
"workers": {
"total": 6.214230646030046,
"count": 1155809,
"self": 0.0,
"children": {
"worker_root": {
"total": 47565.91652237286,
"count": 1155809,
"is_parallel": true,
"self": 11339.841364476248,
"children": {
"steps_from_proto": {
"total": 0.09973392367828637,
"count": 170,
"is_parallel": true,
"self": 0.012506206054240465,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08722771762404591,
"count": 680,
"is_parallel": true,
"self": 0.08722771762404591
}
}
},
"UnityEnvironment.step": {
"total": 36225.97542397294,
"count": 1155809,
"is_parallel": true,
"self": 100.28258109081071,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 645.6035189076792,
"count": 1155809,
"is_parallel": true,
"self": 645.6035189076792
},
"communicator.exchange": {
"total": 34217.75499016093,
"count": 1155809,
"is_parallel": true,
"self": 34217.75499016093
},
"steps_from_proto": {
"total": 1262.3343338135164,
"count": 2311618,
"is_parallel": true,
"self": 148.4430766660953,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1113.8912571474211,
"count": 9246472,
"is_parallel": true,
"self": 1113.8912571474211
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8886.091246685013,
"count": 1155809,
"self": 74.03028768778313,
"children": {
"process_trajectory": {
"total": 1645.7643202384934,
"count": 1155809,
"self": 1642.761222984409,
"children": {
"RLTrainer._checkpoint": {
"total": 3.003097254084423,
"count": 33,
"self": 3.003097254084423
}
}
},
"_update_policy": {
"total": 7166.296638758737,
"count": 810,
"self": 825.5450803557178,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6340.751558403019,
"count": 24300,
"self": 6340.751558403019
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.420297384262085e-07,
"count": 1,
"self": 5.420297384262085e-07
},
"TrainerController._save_models": {
"total": 0.07709974993485957,
"count": 1,
"self": 0.0006019159918650985,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07649783394299448,
"count": 1,
"self": 0.07649783394299448
}
}
}
}
}
}
}