poca-SoccerTwos / run_logs /timers.json
nondevs's picture
First Push
c8c0561
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.572228193283081,
"min": 2.572228193283081,
"max": 3.295742988586426,
"count": 361
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 48975.2265625,
"min": 15986.3876953125,
"max": 144934.21875,
"count": 361
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 55.168539325842694,
"min": 51.5,
"max": 999.0,
"count": 361
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19640.0,
"min": 11988.0,
"max": 27356.0,
"count": 361
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1425.9484813966776,
"min": 1187.7525027713093,
"max": 1429.1515385220728,
"count": 304
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 253818.82968860862,
"min": 2375.5050055426186,
"max": 268680.4892421497,
"count": 304
},
"SoccerTwos.Step.mean": {
"value": 3609919.0,
"min": 9426.0,
"max": 3609919.0,
"count": 361
},
"SoccerTwos.Step.sum": {
"value": 3609919.0,
"min": 9426.0,
"max": 3609919.0,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.04182106629014015,
"min": -0.060071639716625214,
"max": 0.26226282119750977,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 7.485970973968506,
"min": -9.731605529785156,
"max": 31.471538543701172,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.051271967589855194,
"min": -0.07667960971593857,
"max": 0.24757982790470123,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 9.177681922912598,
"min": -12.422097206115723,
"max": 29.709579467773438,
"count": 361
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 361
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.06473742916597335,
"min": -0.4782000019082001,
"max": 0.4747310363013169,
"count": 361
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 11.587999820709229,
"min": -39.345200300216675,
"max": 56.74600076675415,
"count": 361
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.06473742916597335,
"min": -0.4782000019082001,
"max": 0.4747310363013169,
"count": 361
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 11.587999820709229,
"min": -39.345200300216675,
"max": 56.74600076675415,
"count": 361
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 361
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 361
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01921621749934275,
"min": 0.010402970989707683,
"max": 0.024901417353006158,
"count": 169
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01921621749934275,
"min": 0.010402970989707683,
"max": 0.024901417353006158,
"count": 169
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09100064014395078,
"min": 4.047437763195679e-06,
"max": 0.09100064014395078,
"count": 169
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09100064014395078,
"min": 4.047437763195679e-06,
"max": 0.09100064014395078,
"count": 169
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09380210836728414,
"min": 7.709957981205661e-06,
"max": 0.09406529093782107,
"count": 169
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09380210836728414,
"min": 7.709957981205661e-06,
"max": 0.09406529093782107,
"count": 169
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 169
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 169
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 169
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 169
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 169
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 169
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699775325",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699783692"
},
"total": 8367.639902794,
"count": 1,
"self": 0.164090006999686,
"children": {
"run_training.setup": {
"total": 0.0502978330000019,
"count": 1,
"self": 0.0502978330000019
},
"TrainerController.start_learning": {
"total": 8367.425514954,
"count": 1,
"self": 5.847846895956536,
"children": {
"TrainerController._reset_env": {
"total": 10.106135658999648,
"count": 19,
"self": 10.106135658999648
},
"TrainerController.advance": {
"total": 8351.208254458043,
"count": 237557,
"self": 6.343985027857343,
"children": {
"env_step": {
"total": 6827.169975015222,
"count": 237557,
"self": 5299.57868867819,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1524.127934947996,
"count": 237557,
"self": 41.92380931203252,
"children": {
"TorchPolicy.evaluate": {
"total": 1482.2041256359635,
"count": 464462,
"self": 1482.2041256359635
}
}
},
"workers": {
"total": 3.463351389036376,
"count": 237556,
"self": 0.0,
"children": {
"worker_root": {
"total": 8352.699223156349,
"count": 237556,
"is_parallel": true,
"self": 3884.3023343453197,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005055136999999377,
"count": 2,
"is_parallel": true,
"self": 0.0029939379999461835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020611990000531932,
"count": 8,
"is_parallel": true,
"self": 0.0020611990000531932
}
}
},
"UnityEnvironment.step": {
"total": 0.06385541000000217,
"count": 1,
"is_parallel": true,
"self": 0.001181242000001248,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007416850000083741,
"count": 1,
"is_parallel": true,
"self": 0.0007416850000083741
},
"communicator.exchange": {
"total": 0.058506405000002815,
"count": 1,
"is_parallel": true,
"self": 0.058506405000002815
},
"steps_from_proto": {
"total": 0.0034260779999897295,
"count": 2,
"is_parallel": true,
"self": 0.0005890350000470335,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002837042999942696,
"count": 8,
"is_parallel": true,
"self": 0.002837042999942696
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4468.347291175031,
"count": 237555,
"is_parallel": true,
"self": 274.6221968518403,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 184.92352797006362,
"count": 237555,
"is_parallel": true,
"self": 184.92352797006362
},
"communicator.exchange": {
"total": 3140.041087633084,
"count": 237555,
"is_parallel": true,
"self": 3140.041087633084
},
"steps_from_proto": {
"total": 868.7604787200435,
"count": 475110,
"is_parallel": true,
"self": 139.62052761063637,
"children": {
"_process_rank_one_or_two_observation": {
"total": 729.1399511094071,
"count": 1900440,
"is_parallel": true,
"self": 729.1399511094071
}
}
}
}
},
"steps_from_proto": {
"total": 0.049597635998338774,
"count": 36,
"is_parallel": true,
"self": 0.009491506991707865,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04010612900663091,
"count": 144,
"is_parallel": true,
"self": 0.04010612900663091
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1517.6942944149628,
"count": 237556,
"self": 49.94889576212063,
"children": {
"process_trajectory": {
"total": 488.5553028678451,
"count": 237556,
"self": 486.8796415738437,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6756612940014293,
"count": 7,
"self": 1.6756612940014293
}
}
},
"_update_policy": {
"total": 979.1900957849971,
"count": 169,
"self": 591.2771232890102,
"children": {
"TorchPOCAOptimizer.update": {
"total": 387.9129724959869,
"count": 5073,
"self": 387.9129724959869
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3860008039046079e-06,
"count": 1,
"self": 1.3860008039046079e-06
},
"TrainerController._save_models": {
"total": 0.26327655500062974,
"count": 1,
"self": 0.002144932999726734,
"children": {
"RLTrainer._checkpoint": {
"total": 0.261131622000903,
"count": 1,
"self": 0.261131622000903
}
}
}
}
}
}
}