poca-SoccerTwos / run_logs /timers.json
SuburbanLion's picture
First Push
b5c2a82
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5157133340835571,
"min": 1.2935482263565063,
"max": 3.2956314086914062,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31720.849609375,
"min": 20734.28515625,
"max": 131043.8984375,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 91.54545454545455,
"min": 39.274193548387096,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20140.0,
"min": 16360.0,
"max": 23756.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1697.239423038248,
"min": 1197.3130565395577,
"max": 1750.4612546170524,
"count": 4981
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 186696.33653420728,
"min": 2394.6261130791154,
"max": 408770.339808854,
"count": 4981
},
"SoccerTwos.Step.mean": {
"value": 49999932.0,
"min": 9864.0,
"max": 49999932.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999932.0,
"min": 9864.0,
"max": 49999932.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.06489232927560806,
"min": -0.13828794658184052,
"max": 0.183258056640625,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.138156414031982,
"min": -27.519302368164062,
"max": 27.974868774414062,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06335005164146423,
"min": -0.13755041360855103,
"max": 0.1855001300573349,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.968505382537842,
"min": -27.37253189086914,
"max": 27.471893310546875,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.13321090719916603,
"min": -0.48762353027568145,
"max": 0.5933270269149059,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -14.653199791908264,
"min": -64.33020031452179,
"max": 65.97339940071106,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.13321090719916603,
"min": -0.48762353027568145,
"max": 0.5933270269149059,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -14.653199791908264,
"min": -64.33020031452179,
"max": 65.97339940071106,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017387662552452336,
"min": 0.008537781741082048,
"max": 0.026532609233011803,
"count": 2424
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017387662552452336,
"min": 0.008537781741082048,
"max": 0.026532609233011803,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08416157389680544,
"min": 2.681433297766489e-05,
"max": 0.13336161126693089,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08416157389680544,
"min": 2.681433297766489e-05,
"max": 0.13336161126693089,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08481311574578285,
"min": 2.4318247657599083e-05,
"max": 0.1360989545782407,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08481311574578285,
"min": 2.4318247657599083e-05,
"max": 0.1360989545782407,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2424
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2424
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675271541",
"python_version": "3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0]",
"command_line_arguments": "/home/ryan/hf-rl-u7/.venv/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTows.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1675515948"
},
"total": 244406.351708607,
"count": 1,
"self": 0.6412395380320959,
"children": {
"run_training.setup": {
"total": 0.024723760987399146,
"count": 1,
"self": 0.024723760987399146
},
"TrainerController.start_learning": {
"total": 244405.68574530797,
"count": 1,
"self": 108.2894850797602,
"children": {
"TrainerController._reset_env": {
"total": 7.761083504301496,
"count": 250,
"self": 7.761083504301496
},
"TrainerController.advance": {
"total": 244289.24782776795,
"count": 3442079,
"self": 116.4954589283443,
"children": {
"env_step": {
"total": 84596.5759572321,
"count": 3442079,
"self": 69675.5826231119,
"children": {
"SubprocessEnvManager._take_step": {
"total": 14858.47143942205,
"count": 3442079,
"self": 694.5696085291856,
"children": {
"TorchPolicy.evaluate": {
"total": 14163.901830892864,
"count": 6279822,
"self": 14163.901830892864
}
}
},
"workers": {
"total": 62.52189469814766,
"count": 3442079,
"self": 0.0,
"children": {
"worker_root": {
"total": 244215.29263141213,
"count": 3442079,
"is_parallel": true,
"self": 186735.3678908844,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025864450144581497,
"count": 2,
"is_parallel": true,
"self": 0.0006178579933475703,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019685870211105794,
"count": 8,
"is_parallel": true,
"self": 0.0019685870211105794
}
}
},
"UnityEnvironment.step": {
"total": 0.032234045007498935,
"count": 1,
"is_parallel": true,
"self": 0.000590939016547054,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006142240017652512,
"count": 1,
"is_parallel": true,
"self": 0.0006142240017652512
},
"communicator.exchange": {
"total": 0.02902569100842811,
"count": 1,
"is_parallel": true,
"self": 0.02902569100842811
},
"steps_from_proto": {
"total": 0.002003190980758518,
"count": 2,
"is_parallel": true,
"self": 0.0004106759442947805,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015925150364637375,
"count": 8,
"is_parallel": true,
"self": 0.0015925150364637375
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 57479.26351238892,
"count": 3442078,
"is_parallel": true,
"self": 3500.0065727462643,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2593.24616320472,
"count": 3442078,
"is_parallel": true,
"self": 2593.24616320472
},
"communicator.exchange": {
"total": 40589.26293149611,
"count": 3442078,
"is_parallel": true,
"self": 40589.26293149611
},
"steps_from_proto": {
"total": 10796.747844941827,
"count": 6884156,
"is_parallel": true,
"self": 2144.1909672385955,
"children": {
"_process_rank_one_or_two_observation": {
"total": 8652.556877703231,
"count": 27536624,
"is_parallel": true,
"self": 8652.556877703231
}
}
}
}
},
"steps_from_proto": {
"total": 0.6612281388079282,
"count": 498,
"is_parallel": true,
"self": 0.13009566432447173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.5311324744834565,
"count": 1992,
"is_parallel": true,
"self": 0.5311324744834565
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 159576.1764116075,
"count": 3442079,
"self": 722.9643745862704,
"children": {
"process_trajectory": {
"total": 20133.092868809617,
"count": 3442079,
"self": 20099.42255840465,
"children": {
"RLTrainer._checkpoint": {
"total": 33.670310404966585,
"count": 100,
"self": 33.670310404966585
}
}
},
"_update_policy": {
"total": 138720.11916821162,
"count": 2424,
"self": 10030.084023292206,
"children": {
"TorchPOCAOptimizer.update": {
"total": 128690.03514491941,
"count": 72720,
"self": 128690.03514491941
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.74978320300579e-07,
"count": 1,
"self": 9.74978320300579e-07
},
"TrainerController._save_models": {
"total": 0.38734798098448664,
"count": 1,
"self": 0.0024687869590707123,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3848791940254159,
"count": 1,
"self": 0.3848791940254159
}
}
}
}
}
}
}