alkiskoudounas's picture
First Push PyramidsRND1
dbf2561
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.027806609869003296,
"min": 0.027806609869003296,
"max": 1.3808926343917847,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 815.0673217773438,
"min": 815.0673217773438,
"max": 41890.7578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10001560300588608,
"min": -0.13494527339935303,
"max": -0.06219607591629028,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -23.803712844848633,
"min": -32.521812438964844,
"max": -14.740469932556152,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.9446463584899902,
"min": 0.528832197189331,
"max": 0.9996211528778076,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 224.82583618164062,
"min": 125.33323669433594,
"max": 240.90869140625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06918402195000485,
"min": 0.06489209266740485,
"max": 0.07343401221483731,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.899392285350063,
"min": 0.48863747097744503,
"max": 0.9529169728450569,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0005399994343846319,
"min": 0.0005399994343846319,
"max": 0.01735943611402185,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.007019992647000214,
"min": 0.007019992647000214,
"max": 0.12151605279815295,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.57415132146923e-06,
"min": 7.57415132146923e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 9.846396717909999e-05,
"min": 9.846396717909999e-05,
"max": 0.0031186267604577994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252468461538464,
"min": 0.10252468461538464,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.3328209000000002,
"min": 1.2664556000000002,
"max": 2.2395422000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002622159930769231,
"min": 0.0002622159930769231,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0034088079100000005,
"min": 0.0034088079100000005,
"max": 0.10397026577999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.9381809234619141,
"min": 0.6159754395484924,
"max": 0.9708390235900879,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 12.196352005004883,
"min": 4.311828136444092,
"max": 12.593680381774902,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 959.969696969697,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 17982.0,
"min": 15984.0,
"max": 31968.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9998556073341105,
"min": -1.0000000521540642,
"max": -0.8396424718878486,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -17.99740093201399,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9998556073341105,
"min": -1.0000000521540642,
"max": -0.8396424718878486,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -17.99740093201399,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 9.13037273950047,
"min": 5.966575846076012,
"max": 15.1412807777524,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 164.34670931100845,
"min": 160.29252952337265,
"max": 312.6324376165867,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680714981",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680716716"
},
"total": 1734.773392021,
"count": 1,
"self": 0.8373344849997011,
"children": {
"run_training.setup": {
"total": 0.11754184800020084,
"count": 1,
"self": 0.11754184800020084
},
"TrainerController.start_learning": {
"total": 1733.8185156880002,
"count": 1,
"self": 1.2678085060435933,
"children": {
"TrainerController._reset_env": {
"total": 3.701861646999987,
"count": 1,
"self": 3.701861646999987
},
"TrainerController.advance": {
"total": 1728.6925517469567,
"count": 62718,
"self": 1.3867685879854434,
"children": {
"env_step": {
"total": 1127.0125640460349,
"count": 62718,
"self": 1025.3004730838973,
"children": {
"SubprocessEnvManager._take_step": {
"total": 100.93533293406335,
"count": 62718,
"self": 4.503627153050047,
"children": {
"TorchPolicy.evaluate": {
"total": 96.4317057810133,
"count": 62512,
"self": 96.4317057810133
}
}
},
"workers": {
"total": 0.776758028074255,
"count": 62718,
"self": 0.0,
"children": {
"worker_root": {
"total": 1729.3952962420315,
"count": 62718,
"is_parallel": true,
"self": 806.2543269750217,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017321499999525258,
"count": 1,
"is_parallel": true,
"self": 0.0005374050003865705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011947449995659554,
"count": 8,
"is_parallel": true,
"self": 0.0011947449995659554
}
}
},
"UnityEnvironment.step": {
"total": 0.06785332399999788,
"count": 1,
"is_parallel": true,
"self": 0.0007142419997308025,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048166300007324026,
"count": 1,
"is_parallel": true,
"self": 0.00048166300007324026
},
"communicator.exchange": {
"total": 0.06505187500010834,
"count": 1,
"is_parallel": true,
"self": 0.06505187500010834
},
"steps_from_proto": {
"total": 0.0016055440000855015,
"count": 1,
"is_parallel": true,
"self": 0.00035622199970930524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012493220003761962,
"count": 8,
"is_parallel": true,
"self": 0.0012493220003761962
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 923.1409692670097,
"count": 62717,
"is_parallel": true,
"self": 31.201174280058012,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.22014116797891,
"count": 62717,
"is_parallel": true,
"self": 22.22014116797891
},
"communicator.exchange": {
"total": 780.0142519440283,
"count": 62717,
"is_parallel": true,
"self": 780.0142519440283
},
"steps_from_proto": {
"total": 89.70540187494453,
"count": 62717,
"is_parallel": true,
"self": 18.768548982078528,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.936852892866,
"count": 501736,
"is_parallel": true,
"self": 70.936852892866
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 600.2932191129364,
"count": 62718,
"self": 1.839694490954571,
"children": {
"process_trajectory": {
"total": 98.43563496998536,
"count": 62718,
"self": 98.12921774698566,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3064172229996984,
"count": 2,
"self": 0.3064172229996984
}
}
},
"_update_policy": {
"total": 500.0178896519965,
"count": 393,
"self": 318.95783980397596,
"children": {
"TorchPPOOptimizer.update": {
"total": 181.06004984802053,
"count": 23052,
"self": 181.06004984802053
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.419000000169035e-06,
"count": 1,
"self": 1.419000000169035e-06
},
"TrainerController._save_models": {
"total": 0.156292368999857,
"count": 1,
"self": 0.0021750199994130526,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15411734900044394,
"count": 1,
"self": 0.15411734900044394
}
}
}
}
}
}
}