poca-SoccerTwos / run_logs /timers.json
marianafmedeiros's picture
First Push
b58f4fd
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.318866491317749,
"min": 2.281125545501709,
"max": 3.295729160308838,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 44967.45703125,
"min": 21503.2421875,
"max": 114055.9375,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.61797752808989,
"min": 40.21311475409836,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19444.0,
"min": 11816.0,
"max": 28160.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1444.241350433996,
"min": 1188.6479357620524,
"max": 1452.95869128405,
"count": 457
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 257074.96037725132,
"min": 2377.2958715241048,
"max": 333759.33162617043,
"count": 457
},
"SoccerTwos.Step.mean": {
"value": 4999996.0,
"min": 9098.0,
"max": 4999996.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999996.0,
"min": 9098.0,
"max": 4999996.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.01115392055362463,
"min": -0.0824785977602005,
"max": 0.1251400262117386,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.9853978157043457,
"min": -17.485462188720703,
"max": 26.71692657470703,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0083291195333004,
"min": -0.08574127405881882,
"max": 0.12677337229251862,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.4825832843780518,
"min": -18.17715072631836,
"max": 27.129501342773438,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.025107865253191315,
"min": -0.5714285714285714,
"max": 0.3720830150370328,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 4.469200015068054,
"min": -45.066800117492676,
"max": 44.236600160598755,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.025107865253191315,
"min": -0.5714285714285714,
"max": 0.3720830150370328,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 4.469200015068054,
"min": -45.066800117492676,
"max": 44.236600160598755,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01991559787808607,
"min": 0.009667840918700677,
"max": 0.025319089300561853,
"count": 237
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01991559787808607,
"min": 0.009667840918700677,
"max": 0.025319089300561853,
"count": 237
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0988339307407538,
"min": 1.0261525767418789e-05,
"max": 0.10845122387011846,
"count": 237
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0988339307407538,
"min": 1.0261525767418789e-05,
"max": 0.10845122387011846,
"count": 237
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10113530655701955,
"min": 9.740641371536184e-06,
"max": 0.11117829009890556,
"count": 237
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10113530655701955,
"min": 9.740641371536184e-06,
"max": 0.11117829009890556,
"count": 237
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 237
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 237
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 237
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 237
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 237
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 237
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699119567",
"python_version": "3.10.12 (main, Nov 3 2023, 10:20:14) [Clang 15.0.0 (clang-1500.0.40.1)]",
"command_line_arguments": "/Users/mariana/.pyenv/versions/hf-mlagents/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1699139322"
},
"total": 19754.784502083,
"count": 1,
"self": 0.24437058297917247,
"children": {
"run_training.setup": {
"total": 0.02322391699999571,
"count": 1,
"self": 0.02322391699999571
},
"TrainerController.start_learning": {
"total": 19754.51690758302,
"count": 1,
"self": 4.011302950850222,
"children": {
"TrainerController._reset_env": {
"total": 4.623153710999759,
"count": 25,
"self": 4.623153710999759
},
"TrainerController.advance": {
"total": 19745.780157337198,
"count": 333456,
"self": 3.4934298787557054,
"children": {
"env_step": {
"total": 16002.080422994419,
"count": 333456,
"self": 15398.512569719896,
"children": {
"SubprocessEnvManager._take_step": {
"total": 601.0362062355562,
"count": 333456,
"self": 18.547572769370163,
"children": {
"TorchPolicy.evaluate": {
"total": 582.488633466186,
"count": 638470,
"self": 582.488633466186
}
}
},
"workers": {
"total": 2.5316470389661845,
"count": 333456,
"self": 0.0,
"children": {
"worker_root": {
"total": 19744.835674227506,
"count": 333456,
"is_parallel": true,
"self": 4873.171419823164,
"children": {
"steps_from_proto": {
"total": 0.03967812395421788,
"count": 50,
"is_parallel": true,
"self": 0.004709505650680512,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03496861830353737,
"count": 200,
"is_parallel": true,
"self": 0.03496861830353737
}
}
},
"UnityEnvironment.step": {
"total": 14871.624576280388,
"count": 333456,
"is_parallel": true,
"self": 40.67991027238895,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 260.0782363534381,
"count": 333456,
"is_parallel": true,
"self": 260.0782363534381
},
"communicator.exchange": {
"total": 14046.592645224591,
"count": 333456,
"is_parallel": true,
"self": 14046.592645224591
},
"steps_from_proto": {
"total": 524.27378442997,
"count": 666912,
"is_parallel": true,
"self": 60.76829427227494,
"children": {
"_process_rank_one_or_two_observation": {
"total": 463.505490157695,
"count": 2667648,
"is_parallel": true,
"self": 463.505490157695
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3740.2063044640236,
"count": 333456,
"self": 37.917903069755994,
"children": {
"process_trajectory": {
"total": 703.8874651051883,
"count": 333456,
"self": 702.7401225632348,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1473425419535488,
"count": 10,
"self": 1.1473425419535488
}
}
},
"_update_policy": {
"total": 2998.4009362890793,
"count": 237,
"self": 453.46624880400486,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2544.9346874850744,
"count": 7110,
"self": 2544.9346874850744
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.33995558321476e-07,
"count": 1,
"self": 3.33995558321476e-07
},
"TrainerController._save_models": {
"total": 0.10229324997635558,
"count": 1,
"self": 0.0019287089817225933,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10036454099463299,
"count": 1,
"self": 0.10036454099463299
}
}
}
}
}
}
}