PyramidsRND-v0 / run_logs /timers.json
giobin's picture
first PyramidsRND try
36d9512
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.45589110255241394,
"min": 0.4546535909175873,
"max": 1.3663827180862427,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13749.67578125,
"min": 13697.8037109375,
"max": 41450.5859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989954.0,
"min": 29952.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989954.0,
"min": 29952.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6129601001739502,
"min": -0.11014147847890854,
"max": 0.6129601001739502,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 173.46771240234375,
"min": -26.4339542388916,
"max": 173.46771240234375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.9485320448875427,
"min": -0.2020924985408783,
"max": 0.9485320448875427,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 268.4345703125,
"min": -55.97962188720703,
"max": 268.4345703125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06755510861046236,
"min": 0.06502007260791115,
"max": 0.07625880925860698,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9457715205464731,
"min": 0.5338116648102489,
"max": 1.044212316238212,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.05214853348047081,
"min": 0.00038246605056387864,
"max": 0.06962171456516011,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.7300794687265914,
"min": 0.004207126556202665,
"max": 0.9747040039122417,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.428997523699993e-06,
"min": 7.428997523699993e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001040059653317999,
"min": 0.0001040059653317999,
"max": 0.0035073839308721,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247629999999999,
"min": 0.10247629999999999,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346682,
"min": 1.3886848,
"max": 2.5691279,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025738236999999983,
"min": 0.00025738236999999983,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036033531799999977,
"min": 0.0036033531799999977,
"max": 0.11693587721000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007939695380628109,
"min": 0.007939695380628109,
"max": 0.4354114532470703,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11115573346614838,
"min": 0.11115573346614838,
"max": 3.047880172729492,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 313.09574468085106,
"min": 311.94897959183675,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29431.0,
"min": 15984.0,
"max": 33418.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6443340232239125,
"min": -1.0000000521540642,
"max": 1.6633878600115728,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 154.56739818304777,
"min": -31.998001664876938,
"max": 164.6753981411457,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6443340232239125,
"min": -1.0000000521540642,
"max": 1.6633878600115728,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 154.56739818304777,
"min": -31.998001664876938,
"max": 164.6753981411457,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.025704330112506446,
"min": 0.025704330112506446,
"max": 8.730658059939742,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.416207030575606,
"min": 2.416207030575606,
"max": 139.69052895903587,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677594607",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677596950"
},
"total": 2342.813608613,
"count": 1,
"self": 1.1229354970000713,
"children": {
"run_training.setup": {
"total": 0.137566391999826,
"count": 1,
"self": 0.137566391999826
},
"TrainerController.start_learning": {
"total": 2341.553106724,
"count": 1,
"self": 1.4631121429702034,
"children": {
"TrainerController._reset_env": {
"total": 7.320026914000209,
"count": 1,
"self": 7.320026914000209
},
"TrainerController.advance": {
"total": 2332.627165118029,
"count": 63810,
"self": 1.5276263981108968,
"children": {
"env_step": {
"total": 1566.1835136969935,
"count": 63810,
"self": 1449.6049013371367,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.70841910792115,
"count": 63810,
"self": 4.816623619022721,
"children": {
"TorchPolicy.evaluate": {
"total": 110.89179548889842,
"count": 62561,
"self": 37.55098113291069,
"children": {
"TorchPolicy.sample_actions": {
"total": 73.34081435598773,
"count": 62561,
"self": 73.34081435598773
}
}
}
}
},
"workers": {
"total": 0.8701932519356887,
"count": 63810,
"self": 0.0,
"children": {
"worker_root": {
"total": 2336.06547467807,
"count": 63810,
"is_parallel": true,
"self": 1005.7558127040616,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017989749999287596,
"count": 1,
"is_parallel": true,
"self": 0.0006253250003283028,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011736499996004568,
"count": 8,
"is_parallel": true,
"self": 0.0011736499996004568
}
}
},
"UnityEnvironment.step": {
"total": 0.045789749000050506,
"count": 1,
"is_parallel": true,
"self": 0.000517260000378883,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047320500016212463,
"count": 1,
"is_parallel": true,
"self": 0.00047320500016212463
},
"communicator.exchange": {
"total": 0.04319263899969883,
"count": 1,
"is_parallel": true,
"self": 0.04319263899969883
},
"steps_from_proto": {
"total": 0.001606644999810669,
"count": 1,
"is_parallel": true,
"self": 0.00041850699972201255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011881380000886566,
"count": 8,
"is_parallel": true,
"self": 0.0011881380000886566
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1330.3096619740086,
"count": 63809,
"is_parallel": true,
"self": 32.68317430000252,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.76630415803038,
"count": 63809,
"is_parallel": true,
"self": 23.76630415803038
},
"communicator.exchange": {
"total": 1178.4587415410283,
"count": 63809,
"is_parallel": true,
"self": 1178.4587415410283
},
"steps_from_proto": {
"total": 95.40144197494737,
"count": 63809,
"is_parallel": true,
"self": 22.944359807030196,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.45708216791718,
"count": 510472,
"is_parallel": true,
"self": 72.45708216791718
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 764.9160250229247,
"count": 63810,
"self": 2.672408037139121,
"children": {
"process_trajectory": {
"total": 167.17505226978892,
"count": 63810,
"self": 166.9400556227888,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23499664700011635,
"count": 2,
"self": 0.23499664700011635
}
}
},
"_update_policy": {
"total": 595.0685647159967,
"count": 452,
"self": 232.64852356495476,
"children": {
"TorchPPOOptimizer.update": {
"total": 362.4200411510419,
"count": 22824,
"self": 362.4200411510419
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.410005077370442e-07,
"count": 1,
"self": 9.410005077370442e-07
},
"TrainerController._save_models": {
"total": 0.14280160799989972,
"count": 1,
"self": 0.0020561580004141433,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14074544999948557,
"count": 1,
"self": 0.14074544999948557
}
}
}
}
}
}
}