poca-SoccerTwos / run_logs /timers.json
jinghuanHuggingface's picture
First Push
a9ee3ad verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.3642840385437012,
"min": 1.3309460878372192,
"max": 3.2957050800323486,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28333.451171875,
"min": 18824.236328125,
"max": 134818.796875,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 63.42857142857143,
"min": 38.685483870967744,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19536.0,
"min": 11764.0,
"max": 29004.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1476.7826459204307,
"min": 1187.359195178111,
"max": 1609.0609483598403,
"count": 4998
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 227424.52747174632,
"min": 2377.455822252568,
"max": 378674.29368815693,
"count": 4998
},
"SoccerTwos.Step.mean": {
"value": 49999948.0,
"min": 9518.0,
"max": 49999948.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999948.0,
"min": 9518.0,
"max": 49999948.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03670903667807579,
"min": -0.14049525558948517,
"max": 0.17864368855953217,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -5.653191566467285,
"min": -27.11031150817871,
"max": 27.32752227783203,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03587992489337921,
"min": -0.13726955652236938,
"max": 0.18451468646526337,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.525508403778076,
"min": -26.060300827026367,
"max": 26.73061752319336,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.004228573340874214,
"min": -0.5333333333333333,
"max": 0.5204064557629247,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.6512002944946289,
"min": -69.33999991416931,
"max": 70.19380009174347,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.004228573340874214,
"min": -0.5333333333333333,
"max": 0.5204064557629247,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.6512002944946289,
"min": -69.33999991416931,
"max": 70.19380009174347,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012472246635782842,
"min": 0.010156642621101735,
"max": 0.025759396608918905,
"count": 2425
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012472246635782842,
"min": 0.010156642621101735,
"max": 0.025759396608918905,
"count": 2425
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09122076332569122,
"min": 0.0007721815035135175,
"max": 0.12964768807093302,
"count": 2425
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09122076332569122,
"min": 0.0007721815035135175,
"max": 0.12964768807093302,
"count": 2425
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09219344134132067,
"min": 0.0007724896888248623,
"max": 0.1322729304432869,
"count": 2425
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09219344134132067,
"min": 0.0007724896888248623,
"max": 0.1322729304432869,
"count": 2425
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2425
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2425
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 2425
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 2425
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2425
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2425
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1712459075",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/jinghuan/.conda/envs/rl_py10/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1712645118"
},
"total": 186042.95730458386,
"count": 1,
"self": 0.42164613492786884,
"children": {
"run_training.setup": {
"total": 0.014359293971210718,
"count": 1,
"self": 0.014359293971210718
},
"TrainerController.start_learning": {
"total": 186042.52129915496,
"count": 1,
"self": 43.02053129579872,
"children": {
"TrainerController._reset_env": {
"total": 9.091848194599152,
"count": 250,
"self": 9.091848194599152
},
"TrainerController.advance": {
"total": 185990.2364280154,
"count": 3453061,
"self": 43.05949105974287,
"children": {
"env_step": {
"total": 160152.17570470786,
"count": 3453061,
"self": 97256.45733426698,
"children": {
"SubprocessEnvManager._take_step": {
"total": 62870.10368987499,
"count": 3453061,
"self": 324.4425852135755,
"children": {
"TorchPolicy.evaluate": {
"total": 62545.66110466141,
"count": 6278462,
"self": 62545.66110466141
}
}
},
"workers": {
"total": 25.61468056589365,
"count": 3453061,
"self": 0.0,
"children": {
"worker_root": {
"total": 185748.17667129915,
"count": 3453061,
"is_parallel": true,
"self": 97101.17736303573,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006627525668591261,
"count": 2,
"is_parallel": true,
"self": 0.0015474511310458183,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005080074537545443,
"count": 8,
"is_parallel": true,
"self": 0.005080074537545443
}
}
},
"UnityEnvironment.step": {
"total": 0.047877667006105185,
"count": 1,
"is_parallel": true,
"self": 0.001653832383453846,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0012996373698115349,
"count": 1,
"is_parallel": true,
"self": 0.0012996373698115349
},
"communicator.exchange": {
"total": 0.040173609275370836,
"count": 1,
"is_parallel": true,
"self": 0.040173609275370836
},
"steps_from_proto": {
"total": 0.0047505879774689674,
"count": 2,
"is_parallel": true,
"self": 0.0009069270454347134,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003843660932034254,
"count": 8,
"is_parallel": true,
"self": 0.003843660932034254
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 88645.84682184411,
"count": 3453060,
"is_parallel": true,
"self": 5246.0012119724415,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3738.271375231445,
"count": 3453060,
"is_parallel": true,
"self": 3738.271375231445
},
"communicator.exchange": {
"total": 64786.52102768235,
"count": 3453060,
"is_parallel": true,
"self": 64786.52102768235
},
"steps_from_proto": {
"total": 14875.053206957877,
"count": 6906120,
"is_parallel": true,
"self": 2806.225925695151,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12068.827281262726,
"count": 27624480,
"is_parallel": true,
"self": 12068.827281262726
}
}
}
}
},
"steps_from_proto": {
"total": 1.1524864193052053,
"count": 498,
"is_parallel": true,
"self": 0.2139191417954862,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.9385672775097191,
"count": 1992,
"is_parallel": true,
"self": 0.9385672775097191
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 25795.0012322478,
"count": 3453061,
"self": 306.4589294404723,
"children": {
"process_trajectory": {
"total": 16522.97932873806,
"count": 3453061,
"self": 16501.552819521632,
"children": {
"RLTrainer._checkpoint": {
"total": 21.426509216427803,
"count": 100,
"self": 21.426509216427803
}
}
},
"_update_policy": {
"total": 8965.562974069268,
"count": 2425,
"self": 4947.9246844635345,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4017.638289605733,
"count": 72753,
"self": 4017.638289605733
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.201467037200928e-07,
"count": 1,
"self": 9.201467037200928e-07
},
"TrainerController._save_models": {
"total": 0.17249072901904583,
"count": 1,
"self": 0.001132055651396513,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17135867336764932,
"count": 1,
"self": 0.17135867336764932
}
}
}
}
}
}
}