poca-SoccerTwos / run_logs /timers.json
chist's picture
4th Push
1d61b1c
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4963933229446411,
"min": 1.408483624458313,
"max": 3.29571270942688,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 29353.251953125,
"min": 17898.99609375,
"max": 129690.0234375,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 64.85526315789474,
"min": 48.93,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19716.0,
"min": 16320.0,
"max": 23548.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1986.2290766666756,
"min": 1196.4713145206497,
"max": 2035.545787867456,
"count": 4718
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 301906.8196533347,
"min": 2395.5018840418998,
"max": 389188.2794481589,
"count": 4718
},
"SoccerTwos.Step.mean": {
"value": 49999988.0,
"min": 9760.0,
"max": 49999988.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999988.0,
"min": 9760.0,
"max": 49999988.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.01866314746439457,
"min": -0.14000248908996582,
"max": 0.12597665190696716,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.8367984294891357,
"min": -22.540401458740234,
"max": 16.092144012451172,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.018723903223872185,
"min": -0.14310425519943237,
"max": 0.1285843402147293,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.8460333347320557,
"min": -22.094587326049805,
"max": 16.1998348236084,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.03278157899254247,
"min": -0.6470588235294118,
"max": 0.5818533301353455,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.982800006866455,
"min": -62.61580002307892,
"max": 53.87600004673004,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.03278157899254247,
"min": -0.6470588235294118,
"max": 0.5818533301353455,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.982800006866455,
"min": -62.61580002307892,
"max": 53.87600004673004,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01672766050323844,
"min": 0.010218130899981285,
"max": 0.026053640067887802,
"count": 2409
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01672766050323844,
"min": 0.010218130899981285,
"max": 0.026053640067887802,
"count": 2409
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10420755222439766,
"min": 2.20016370307737e-09,
"max": 0.10751013110081355,
"count": 2409
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10420755222439766,
"min": 2.20016370307737e-09,
"max": 0.10751013110081355,
"count": 2409
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10512582957744598,
"min": 2.462296692466263e-09,
"max": 0.1082919495801131,
"count": 2409
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10512582957744598,
"min": 2.462296692466263e-09,
"max": 0.1082919495801131,
"count": 2409
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2409
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2409
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2409
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2409
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2409
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2409
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676069006",
"python_version": "3.9.16 (main, Jan 11 2023, 10:02:19) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/ivanchistyakov/Desktop/ml-agents/ml-agents/mlagents/trainers/learn.py ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1676391259"
},
"total": 322242.644030672,
"count": 1,
"self": 0.3279518609633669,
"children": {
"run_training.setup": {
"total": 0.016999580000000014,
"count": 1,
"self": 0.016999580000000014
},
"TrainerController.start_learning": {
"total": 322242.299079231,
"count": 1,
"self": 59.97265928611159,
"children": {
"TrainerController._reset_env": {
"total": 6.2347968669171685,
"count": 250,
"self": 6.2347968669171685
},
"TrainerController.advance": {
"total": 322175.92392550997,
"count": 3376647,
"self": 63.354187416203786,
"children": {
"env_step": {
"total": 42838.97478724331,
"count": 3376647,
"self": 34941.08387098866,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7858.295024526432,
"count": 3376647,
"self": 333.92455918631913,
"children": {
"TorchPolicy.evaluate": {
"total": 7524.370465340113,
"count": 6305168,
"self": 7524.370465340113
}
}
},
"workers": {
"total": 39.59589172821464,
"count": 3376647,
"self": 0.0,
"children": {
"worker_root": {
"total": 322142.54228148673,
"count": 3376647,
"is_parallel": true,
"self": 294074.76907246886,
"children": {
"steps_from_proto": {
"total": 0.3860330660266218,
"count": 500,
"is_parallel": true,
"self": 0.0781055047419148,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.307927561284707,
"count": 2000,
"is_parallel": true,
"self": 0.307927561284707
}
}
},
"UnityEnvironment.step": {
"total": 28067.38717595182,
"count": 3376647,
"is_parallel": true,
"self": 1689.5224428774309,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1064.3330249926516,
"count": 3376647,
"is_parallel": true,
"self": 1064.3330249926516
},
"communicator.exchange": {
"total": 20361.388769087243,
"count": 3376647,
"is_parallel": true,
"self": 20361.388769087243
},
"steps_from_proto": {
"total": 4952.142938994494,
"count": 6753294,
"is_parallel": true,
"self": 1010.4682478903151,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3941.674691104179,
"count": 27013176,
"is_parallel": true,
"self": 3941.674691104179
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 279273.5949508505,
"count": 3376647,
"self": 435.3135163091356,
"children": {
"process_trajectory": {
"total": 12910.544843309937,
"count": 3376647,
"self": 12890.749638091116,
"children": {
"RLTrainer._checkpoint": {
"total": 19.79520521882091,
"count": 100,
"self": 19.79520521882091
}
}
},
"_update_policy": {
"total": 265927.73659123137,
"count": 2409,
"self": 6506.972498562565,
"children": {
"TorchPOCAOptimizer.update": {
"total": 259420.7640926688,
"count": 72270,
"self": 259420.7640926688
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.500057108700275e-07,
"count": 1,
"self": 7.500057108700275e-07
},
"TrainerController._save_models": {
"total": 0.16769681801088154,
"count": 1,
"self": 0.0011068930034525692,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16658992500742897,
"count": 1,
"self": 0.16658992500742897
}
}
}
}
}
}
}