ppo-Pyramids / run_logs /timers.json
kowalsky's picture
ppo-Pyramids
62eca0d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.9120617508888245,
"min": 0.9120617508888245,
"max": 1.464768886566162,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 27288.88671875,
"min": 27288.88671875,
"max": 44435.23046875,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479896.0,
"min": 29932.0,
"max": 479896.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479896.0,
"min": 29932.0,
"max": 479896.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0794866755604744,
"min": -0.09307115525007248,
"max": 0.0794866755604744,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 19.633209228515625,
"min": -22.52322006225586,
"max": 19.633209228515625,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.1525806188583374,
"min": -0.1525806188583374,
"max": 0.30635830760002136,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -37.68741226196289,
"min": -37.68741226196289,
"max": 73.83235168457031,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06788035233505722,
"min": 0.06482858696982788,
"max": 0.07256317303650647,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9503249326908012,
"min": 0.5507658067308973,
"max": 1.0768643986375535,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014639852689273824,
"min": 0.0013250867306813482,
"max": 0.014639852689273824,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20495793764983353,
"min": 0.01617370724730821,
"max": 0.20495793764983353,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0696107387042858e-05,
"min": 2.0696107387042858e-05,
"max": 0.0002896845034384999,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0002897455034186,
"min": 0.0002897455034186,
"max": 0.0030857420714193997,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10689867142857143,
"min": 0.10689867142857143,
"max": 0.19656150000000003,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4965814,
"min": 1.4965814,
"max": 2.4221996,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0006991772757142858,
"min": 0.0006991772757142858,
"max": 0.009656493849999998,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00978848186,
"min": 0.00978848186,
"max": 0.10288520194,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.02121802046895027,
"min": 0.02121802046895027,
"max": 0.47465378046035767,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2970522940158844,
"min": 0.2970522940158844,
"max": 3.7972302436828613,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 768.1538461538462,
"min": 768.1538461538462,
"max": 997.03125,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29958.0,
"min": 16347.0,
"max": 32222.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.40857430757620394,
"min": -0.9354500502813607,
"max": 0.40857430757620394,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 15.934397995471954,
"min": -29.934401609003544,
"max": 15.934397995471954,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.40857430757620394,
"min": -0.9354500502813607,
"max": 0.40857430757620394,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 15.934397995471954,
"min": -29.934401609003544,
"max": 15.934397995471954,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1693210072684078,
"min": 0.1693210072684078,
"max": 9.300809071344489,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.603519283467904,
"min": 6.603519283467904,
"max": 158.1137542128563,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697783131",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697784137"
},
"total": 1006.3937609530003,
"count": 1,
"self": 0.4881499310004074,
"children": {
"run_training.setup": {
"total": 0.04273510500001976,
"count": 1,
"self": 0.04273510500001976
},
"TrainerController.start_learning": {
"total": 1005.8628759169999,
"count": 1,
"self": 0.5977895260116384,
"children": {
"TrainerController._reset_env": {
"total": 3.584340658999963,
"count": 1,
"self": 3.584340658999963
},
"TrainerController.advance": {
"total": 1001.6061616179882,
"count": 31629,
"self": 0.6487785780318518,
"children": {
"env_step": {
"total": 691.1402908209716,
"count": 31629,
"self": 627.847409748973,
"children": {
"SubprocessEnvManager._take_step": {
"total": 62.9301241919984,
"count": 31629,
"self": 2.2358143350097635,
"children": {
"TorchPolicy.evaluate": {
"total": 60.69430985698864,
"count": 31301,
"self": 60.69430985698864
}
}
},
"workers": {
"total": 0.36275688000023365,
"count": 31629,
"self": 0.0,
"children": {
"worker_root": {
"total": 1003.7252299650388,
"count": 31629,
"is_parallel": true,
"self": 429.3982700500178,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001975033999997322,
"count": 1,
"is_parallel": true,
"self": 0.0006030470001405774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013719869998567447,
"count": 8,
"is_parallel": true,
"self": 0.0013719869998567447
}
}
},
"UnityEnvironment.step": {
"total": 0.0511135839999497,
"count": 1,
"is_parallel": true,
"self": 0.0005747039999732806,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000496862999852965,
"count": 1,
"is_parallel": true,
"self": 0.000496862999852965
},
"communicator.exchange": {
"total": 0.048288321999962136,
"count": 1,
"is_parallel": true,
"self": 0.048288321999962136
},
"steps_from_proto": {
"total": 0.001753695000161315,
"count": 1,
"is_parallel": true,
"self": 0.00039873100013210205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001354964000029213,
"count": 8,
"is_parallel": true,
"self": 0.001354964000029213
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 574.326959915021,
"count": 31628,
"is_parallel": true,
"self": 16.997994626078253,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.886607987981051,
"count": 31628,
"is_parallel": true,
"self": 11.886607987981051
},
"communicator.exchange": {
"total": 497.613680516979,
"count": 31628,
"is_parallel": true,
"self": 497.613680516979
},
"steps_from_proto": {
"total": 47.8286767839827,
"count": 31628,
"is_parallel": true,
"self": 9.357476240068081,
"children": {
"_process_rank_one_or_two_observation": {
"total": 38.47120054391462,
"count": 253024,
"is_parallel": true,
"self": 38.47120054391462
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 309.8170922189847,
"count": 31629,
"self": 1.126637351983618,
"children": {
"process_trajectory": {
"total": 58.54621388800206,
"count": 31629,
"self": 58.45991850400196,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08629538400009551,
"count": 1,
"self": 0.08629538400009551
}
}
},
"_update_policy": {
"total": 250.14424097899905,
"count": 220,
"self": 149.44919185796903,
"children": {
"TorchPPOOptimizer.update": {
"total": 100.69504912103002,
"count": 11397,
"self": 100.69504912103002
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.609999895270448e-07,
"count": 1,
"self": 8.609999895270448e-07
},
"TrainerController._save_models": {
"total": 0.07458325300012802,
"count": 1,
"self": 0.0012606509999386617,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07332260200018936,
"count": 1,
"self": 0.07332260200018936
}
}
}
}
}
}
}