poca-SoccerTwos / run_logs /timers.json
angelinux's picture
First Push
455830f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.165266990661621,
"min": 3.1445515155792236,
"max": 3.2956528663635254,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 45478.5546875,
"min": 43273.23046875,
"max": 116318.65625,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 617.625,
"min": 305.52941176470586,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19764.0,
"min": 12716.0,
"max": 28120.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1198.8048364886927,
"min": 1192.7861744586792,
"max": 1203.6383285599431,
"count": 49
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 11988.048364886927,
"min": 2390.382372402381,
"max": 31132.04478670979,
"count": 49
},
"SoccerTwos.Step.mean": {
"value": 499500.0,
"min": 9078.0,
"max": 499500.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499500.0,
"min": 9078.0,
"max": 499500.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0029209901113063097,
"min": -0.057711124420166016,
"max": 0.009688904508948326,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.046735841780900955,
"min": -0.9233206510543823,
"max": 0.3294227421283722,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0008710850961506367,
"min": -0.05763240158557892,
"max": 0.010258274152874947,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.013937361538410187,
"min": -0.9221060872077942,
"max": 0.3111327588558197,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.059425003826618195,
"min": -0.45454545454545453,
"max": 0.2604222165213691,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.9508000612258911,
"min": -6.0,
"max": 5.899199843406677,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.059425003826618195,
"min": -0.45454545454545453,
"max": 0.2604222165213691,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.9508000612258911,
"min": -6.0,
"max": 5.899199843406677,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.021989391966758014,
"min": 0.021314931526042833,
"max": 0.028781463496852667,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.021989391966758014,
"min": 0.021314931526042833,
"max": 0.028781463496852667,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.005325466050722059,
"min": 7.7253093214343e-05,
"max": 0.005325466050722059,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.005325466050722059,
"min": 7.7253093214343e-05,
"max": 0.005325466050722059,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.005394036829885509,
"min": 7.662698961136508e-05,
"max": 0.005394036829885509,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.005394036829885509,
"min": 7.662698961136508e-05,
"max": 0.005394036829885509,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003000000000000002,
"min": 0.0003000000000000001,
"max": 0.0003000000000000002,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003000000000000002,
"min": 0.0003000000000000001,
"max": 0.0003000000000000002,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.1999999999999999,
"min": 0.1999999999999999,
"max": 0.19999999999999993,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.1999999999999999,
"min": 0.1999999999999999,
"max": 0.19999999999999993,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677770315",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:38:11) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/angelinux/miniforge3/envs/deep-rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1677771041"
},
"total": 726.215499,
"count": 1,
"self": 0.22824949899995772,
"children": {
"run_training.setup": {
"total": 0.07520620899999919,
"count": 1,
"self": 0.07520620899999919
},
"TrainerController.start_learning": {
"total": 725.912043292,
"count": 1,
"self": 0.3577547940047907,
"children": {
"TrainerController._reset_env": {
"total": 2.336004541000035,
"count": 13,
"self": 2.336004541000035
},
"TrainerController.advance": {
"total": 723.1292247899952,
"count": 36549,
"self": 0.39230298800100627,
"children": {
"env_step": {
"total": 378.1574145819987,
"count": 36549,
"self": 318.1327852690006,
"children": {
"SubprocessEnvManager._take_step": {
"total": 59.750773112001944,
"count": 36549,
"self": 1.8234825619938348,
"children": {
"TorchPolicy.evaluate": {
"total": 57.92729055000811,
"count": 72448,
"self": 57.92729055000811
}
}
},
"workers": {
"total": 0.27385620099615515,
"count": 36549,
"self": 0.0,
"children": {
"worker_root": {
"total": 722.9064067609972,
"count": 36549,
"is_parallel": true,
"self": 457.79318997699454,
"children": {
"steps_from_proto": {
"total": 0.011790787999841346,
"count": 26,
"is_parallel": true,
"self": 0.002134415999605066,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00965637200023628,
"count": 104,
"is_parallel": true,
"self": 0.00965637200023628
}
}
},
"UnityEnvironment.step": {
"total": 265.1014259960029,
"count": 36549,
"is_parallel": true,
"self": 14.808606606005554,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.797140993001641,
"count": 36549,
"is_parallel": true,
"self": 7.797140993001641
},
"communicator.exchange": {
"total": 207.9217918940034,
"count": 36549,
"is_parallel": true,
"self": 207.9217918940034
},
"steps_from_proto": {
"total": 34.57388650299227,
"count": 73098,
"is_parallel": true,
"self": 5.897746646961849,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.676139856030424,
"count": 292392,
"is_parallel": true,
"self": 28.676139856030424
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 344.5795072199955,
"count": 36549,
"self": 3.398562609986527,
"children": {
"process_trajectory": {
"total": 50.93060661000922,
"count": 36549,
"self": 50.81889890200918,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11170770800003993,
"count": 1,
"self": 0.11170770800003993
}
}
},
"_update_policy": {
"total": 290.2503379999997,
"count": 23,
"self": 42.781863164000924,
"children": {
"TorchPOCAOptimizer.update": {
"total": 247.4684748359988,
"count": 1401,
"self": 247.4684748359988
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.330000026835478e-07,
"count": 1,
"self": 3.330000026835478e-07
},
"TrainerController._save_models": {
"total": 0.08905883399995673,
"count": 1,
"self": 0.0007829590000483222,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08827587499990841,
"count": 1,
"self": 0.08827587499990841
}
}
}
}
}
}
}