poca-SoccerTwos / run_logs /timers.json
vagi's picture
Second Push after interruption
563bcfa verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2478506565093994,
"min": 2.2357864379882812,
"max": 2.4217634201049805,
"count": 67
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 91064.921875,
"min": 38863.625,
"max": 101055.34375,
"count": 67
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 42.06086956521739,
"min": 40.87777777777778,
"max": 53.73770491803279,
"count": 67
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 38696.0,
"min": 14716.0,
"max": 39844.0,
"count": 67
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1197.0246153318647,
"min": 1176.5456932491104,
"max": 1227.0267909219529,
"count": 67
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 550631.3230526578,
"min": 213921.57633360883,
"max": 572177.7661286313,
"count": 67
},
"SoccerTwos.Step.mean": {
"value": 6879981.0,
"min": 5559962.0,
"max": 6879981.0,
"count": 67
},
"SoccerTwos.Step.sum": {
"value": 6879981.0,
"min": 5559962.0,
"max": 6879981.0,
"count": 67
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.018729526549577713,
"min": -0.041034113615751266,
"max": 0.053876958787441254,
"count": 67
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 8.615582466125488,
"min": -17.23432731628418,
"max": 22.52056884765625,
"count": 67
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.01690117083489895,
"min": -0.04584740102291107,
"max": 0.0544377937912941,
"count": 67
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 7.774538993835449,
"min": -19.255908966064453,
"max": 22.75499725341797,
"count": 67
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 67
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 67
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.029700001426365065,
"min": -0.16277437106422757,
"max": 0.1755102143718058,
"count": 67
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 13.66200065612793,
"min": -71.13240015506744,
"max": 73.88980025053024,
"count": 67
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.029700001426365065,
"min": -0.16277437106422757,
"max": 0.1755102143718058,
"count": 67
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 13.66200065612793,
"min": -71.13240015506744,
"max": 73.88980025053024,
"count": 67
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 67
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 67
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018331647620863804,
"min": 0.01613544496940449,
"max": 0.01887765062280232,
"count": 12
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018331647620863804,
"min": 0.01613544496940449,
"max": 0.01887765062280232,
"count": 12
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06169401177763939,
"min": 0.05694894982874393,
"max": 0.06169401177763939,
"count": 12
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06169401177763939,
"min": 0.05694894982874393,
"max": 0.06169401177763939,
"count": 12
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.06926548351347446,
"min": 0.0646216332167387,
"max": 0.06926548351347446,
"count": 12
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.06926548351347446,
"min": 0.0646216332167387,
"max": 0.06926548351347446,
"count": 12
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00016086901782621,
"min": 0.00016086901782621,
"max": 0.00021725030654994994,
"count": 12
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00016086901782621,
"min": 0.00016086901782621,
"max": 0.00021725030654994994,
"count": 12
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.13217378999999996,
"min": 0.13217378999999996,
"max": 0.14345004999999997,
"count": 12
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.13217378999999996,
"min": 0.13217378999999996,
"max": 0.14345004999999997,
"count": 12
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.0016154721210000003,
"min": 0.0016154721210000003,
"max": 0.0021781574949999998,
"count": 12
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.0016154721210000003,
"min": 0.0016154721210000003,
"max": 0.0021781574949999998,
"count": 12
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1733230749",
"python_version": "3.10.12 (main, Jul 5 2023, 15:34:07) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/mango/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2",
"numpy_version": "1.23.5",
"end_time_seconds": "1733240280"
},
"total": 9530.95424354,
"count": 1,
"self": 1.4487167999977828,
"children": {
"run_training.setup": {
"total": 0.04115357099999528,
"count": 1,
"self": 0.04115357099999528
},
"TrainerController.start_learning": {
"total": 9529.464373169001,
"count": 1,
"self": 2.0527336289014784,
"children": {
"TrainerController._reset_env": {
"total": 4.559869480998714,
"count": 6,
"self": 4.559869480998714
},
"TrainerController.advance": {
"total": 9522.7014330811,
"count": 94878,
"self": 1.9191404055654857,
"children": {
"env_step": {
"total": 7025.0258754607985,
"count": 94878,
"self": 6790.529097738346,
"children": {
"SubprocessEnvManager._take_step": {
"total": 233.28466942114665,
"count": 94878,
"self": 9.769179835016075,
"children": {
"TorchPolicy.evaluate": {
"total": 223.51548958613057,
"count": 167620,
"self": 223.51548958613057
}
}
},
"workers": {
"total": 1.2121083013062162,
"count": 94877,
"self": 0.0,
"children": {
"worker_root": {
"total": 9522.64279122905,
"count": 94877,
"is_parallel": true,
"self": 2940.848590467828,
"children": {
"steps_from_proto": {
"total": 0.016203271999074786,
"count": 12,
"is_parallel": true,
"self": 0.0022021899931132793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.014001082005961507,
"count": 48,
"is_parallel": true,
"self": 0.014001082005961507
}
}
},
"UnityEnvironment.step": {
"total": 6581.777997489222,
"count": 94877,
"is_parallel": true,
"self": 21.93422803523572,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 134.36988261496208,
"count": 94877,
"is_parallel": true,
"self": 134.36988261496208
},
"communicator.exchange": {
"total": 6176.115223347013,
"count": 94877,
"is_parallel": true,
"self": 6176.115223347013
},
"steps_from_proto": {
"total": 249.35866349201115,
"count": 189754,
"is_parallel": true,
"self": 33.914368653379825,
"children": {
"_process_rank_one_or_two_observation": {
"total": 215.44429483863132,
"count": 759016,
"is_parallel": true,
"self": 215.44429483863132
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2495.756417214736,
"count": 94877,
"self": 12.74757487437364,
"children": {
"process_trajectory": {
"total": 276.40217168436266,
"count": 94877,
"self": 276.0842088503623,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31796283400035463,
"count": 2,
"self": 0.31796283400035463
}
}
},
"_update_policy": {
"total": 2206.6066706559996,
"count": 13,
"self": 303.31123607103837,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1903.2954345849612,
"count": 3250,
"self": 1903.2954345849612
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3030003174208105e-06,
"count": 1,
"self": 1.3030003174208105e-06
},
"TrainerController._save_models": {
"total": 0.15033567499995115,
"count": 1,
"self": 0.0011441690003266558,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1491915059996245,
"count": 1,
"self": 0.1491915059996245
}
}
}
}
}
}
}