aidenlee's picture
First Commit
8da5e9c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.29890257120132446,
"min": 0.2798689603805542,
"max": 0.690678060054779,
"count": 34
},
"Pyramids.Policy.Entropy.sum": {
"value": 8947.947265625,
"min": 6837.51416015625,
"max": 20687.189453125,
"count": 34
},
"Pyramids.Step.mean": {
"value": 1499928.0,
"min": 509984.0,
"max": 1499928.0,
"count": 34
},
"Pyramids.Step.sum": {
"value": 1499928.0,
"min": 509984.0,
"max": 1499928.0,
"count": 34
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6424160599708557,
"min": -0.07951540499925613,
"max": 0.7126874327659607,
"count": 34
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 183.0885772705078,
"min": -19.163211822509766,
"max": 209.5301055908203,
"count": 34
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.006785540841519833,
"min": -0.012906786054372787,
"max": 0.03882264718413353,
"count": 34
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.9338791370391846,
"min": -3.639713764190674,
"max": 10.443291664123535,
"count": 34
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 300.40776699029124,
"min": 158.0,
"max": 972.09375,
"count": 34
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30942.0,
"min": 474.0,
"max": 33200.0,
"count": 34
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.680166977267821,
"min": -0.7852750485762954,
"max": 1.8419999827941258,
"count": 34
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 173.05719865858555,
"min": -25.128801554441452,
"max": 195.44419772922993,
"count": 34
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.680166977267821,
"min": -0.7852750485762954,
"max": 1.8419999827941258,
"count": 34
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 173.05719865858555,
"min": -25.128801554441452,
"max": 195.44419772922993,
"count": 34
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.031643046891880196,
"min": 0.029883607279324473,
"max": 0.17481861807382068,
"count": 34
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2592338298636605,
"min": 0.14030545111745596,
"max": 6.817926104879007,
"count": 34
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06658352132720667,
"min": 0.06625723796691198,
"max": 0.07332178857824248,
"count": 34
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9321692985808934,
"min": 0.21996536573472744,
"max": 1.0763413148992649,
"count": 34
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01617559841306553,
"min": 0.0017751062543497832,
"max": 0.01617559841306553,
"count": 34
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2264583777829174,
"min": 0.0211898731298182,
"max": 0.23511970965288737,
"count": 34
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.1718132284761884e-06,
"min": 3.1718132284761884e-06,
"max": 0.0001991639002787111,
"count": 34
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.440538519866664e-05,
"min": 4.440538519866664e-05,
"max": 0.0027301714899430002,
"count": 34
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10105723809523813,
"min": 0.10105723809523813,
"max": 0.16638795555555558,
"count": 34
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4148013333333338,
"min": 0.49916386666666673,
"max": 2.3100570000000005,
"count": 34
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00011561808571428563,
"min": 0.00011561808571428563,
"max": 0.00664215676,
"count": 34
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0016186531999999988,
"min": 0.0016186531999999988,
"max": 0.0910546943,
"count": 34
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010202839970588684,
"min": 0.010060305707156658,
"max": 0.020084787160158157,
"count": 34
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14283975958824158,
"min": 0.05922834575176239,
"max": 0.2811870276927948,
"count": 34
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 34
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 34
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679882068",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679885401"
},
"total": 3332.302809297,
"count": 1,
"self": 0.5953055580002911,
"children": {
"run_training.setup": {
"total": 0.13212189299974852,
"count": 1,
"self": 0.13212189299974852
},
"TrainerController.start_learning": {
"total": 3331.575381846,
"count": 1,
"self": 2.333438276002198,
"children": {
"TrainerController._reset_env": {
"total": 5.435023740999895,
"count": 1,
"self": 5.435023740999895
},
"TrainerController.advance": {
"total": 3323.6996491129976,
"count": 64337,
"self": 2.5265376661873233,
"children": {
"env_step": {
"total": 2247.403280915897,
"count": 64337,
"self": 2114.6026447800486,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.37163090778722,
"count": 64337,
"self": 6.562056770887466,
"children": {
"TorchPolicy.evaluate": {
"total": 124.80957413689976,
"count": 62549,
"self": 124.80957413689976
}
}
},
"workers": {
"total": 1.4290052280612144,
"count": 64337,
"self": 0.0,
"children": {
"worker_root": {
"total": 3324.3106096400124,
"count": 64337,
"is_parallel": true,
"self": 1379.4548536519178,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003024829999958456,
"count": 1,
"is_parallel": true,
"self": 0.0008231780002461164,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022016519997123396,
"count": 8,
"is_parallel": true,
"self": 0.0022016519997123396
}
}
},
"UnityEnvironment.step": {
"total": 0.09306360099981248,
"count": 1,
"is_parallel": true,
"self": 0.0006208509994394262,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005132300002514967,
"count": 1,
"is_parallel": true,
"self": 0.0005132300002514967
},
"communicator.exchange": {
"total": 0.08995682099975966,
"count": 1,
"is_parallel": true,
"self": 0.08995682099975966
},
"steps_from_proto": {
"total": 0.0019726990003618994,
"count": 1,
"is_parallel": true,
"self": 0.00044938199971511494,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015233170006467844,
"count": 8,
"is_parallel": true,
"self": 0.0015233170006467844
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1944.8557559880946,
"count": 64336,
"is_parallel": true,
"self": 42.58033807901211,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.081649256087076,
"count": 64336,
"is_parallel": true,
"self": 26.081649256087076
},
"communicator.exchange": {
"total": 1750.0566777308204,
"count": 64336,
"is_parallel": true,
"self": 1750.0566777308204
},
"steps_from_proto": {
"total": 126.13709092217505,
"count": 64336,
"is_parallel": true,
"self": 28.810998218191344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 97.3260927039837,
"count": 514688,
"is_parallel": true,
"self": 97.3260927039837
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1073.7698305309132,
"count": 64337,
"self": 4.693636518783933,
"children": {
"process_trajectory": {
"total": 151.88804837512998,
"count": 64337,
"self": 151.66285565312955,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22519272200042906,
"count": 2,
"self": 0.22519272200042906
}
}
},
"_update_policy": {
"total": 917.1881456369993,
"count": 470,
"self": 393.269350020998,
"children": {
"TorchPPOOptimizer.update": {
"total": 523.9187956160013,
"count": 22740,
"self": 523.9187956160013
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1020001693395898e-06,
"count": 1,
"self": 1.1020001693395898e-06
},
"TrainerController._save_models": {
"total": 0.10726961399996071,
"count": 1,
"self": 0.0024033519985096063,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1048662620014511,
"count": 1,
"self": 0.1048662620014511
}
}
}
}
}
}
}