jgalego's picture
First push
d96bfff
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2850606441497803,
"min": 0.2850606441497803,
"max": 1.4549331665039062,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8570.0634765625,
"min": 8570.0634765625,
"max": 44136.8515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989960.0,
"min": 29936.0,
"max": 989960.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989960.0,
"min": 29936.0,
"max": 989960.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6157785654067993,
"min": -0.2300759255886078,
"max": 0.6157785654067993,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 171.80221557617188,
"min": -54.75807189941406,
"max": 171.80221557617188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006526252720504999,
"min": -0.0025858799926936626,
"max": 0.5017464756965637,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.8208245038986206,
"min": -0.7343899011611938,
"max": 119.41566467285156,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06751514978426233,
"min": 0.06473264137706237,
"max": 0.07424198217387411,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9452120969796726,
"min": 0.4879827787286747,
"max": 1.0570276620633463,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013204971776817209,
"min": 0.0010211280794210785,
"max": 0.01827282625279395,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18486960487544093,
"min": 0.01327466503247402,
"max": 0.2265255921702905,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.380633254107147e-06,
"min": 7.380633254107147e-06,
"max": 0.0002952336015888,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010332886555750006,
"min": 0.00010332886555750006,
"max": 0.0036348160883947,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246017857142857,
"min": 0.10246017857142857,
"max": 0.19841119999999998,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344425,
"min": 1.3888783999999998,
"max": 2.611605300000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002557718392857145,
"min": 0.0002557718392857145,
"max": 0.009841278879999998,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003580805750000003,
"min": 0.003580805750000003,
"max": 0.12117936947,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014204681850969791,
"min": 0.014204681850969791,
"max": 0.6670668721199036,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19886554777622223,
"min": 0.19886554777622223,
"max": 4.669467926025391,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 323.97777777777776,
"min": 318.88659793814435,
"max": 994.1290322580645,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29158.0,
"min": 17118.0,
"max": 32921.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6512898628631334,
"min": -0.8659677964545065,
"max": 1.6604886503563714,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 146.96479779481888,
"min": -26.845001690089703,
"max": 161.06739908456802,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6512898628631334,
"min": -0.8659677964545065,
"max": 1.6604886503563714,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 146.96479779481888,
"min": -26.845001690089703,
"max": 161.06739908456802,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.047866596891335475,
"min": 0.047866596891335475,
"max": 13.375689694450962,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.260127123328857,
"min": 4.260127123328857,
"max": 240.7624145001173,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679700791",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679703095"
},
"total": 2304.013763954,
"count": 1,
"self": 0.8000883119998434,
"children": {
"run_training.setup": {
"total": 0.10927096400018854,
"count": 1,
"self": 0.10927096400018854
},
"TrainerController.start_learning": {
"total": 2303.104404678,
"count": 1,
"self": 1.6061935609732245,
"children": {
"TrainerController._reset_env": {
"total": 7.191205973000024,
"count": 1,
"self": 7.191205973000024
},
"TrainerController.advance": {
"total": 2294.1603502940266,
"count": 63936,
"self": 1.708674820024953,
"children": {
"env_step": {
"total": 1633.6623783440214,
"count": 63936,
"self": 1511.3710944750628,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.3093568119516,
"count": 63936,
"self": 5.279877075915238,
"children": {
"TorchPolicy.evaluate": {
"total": 116.02947973603636,
"count": 62568,
"self": 116.02947973603636
}
}
},
"workers": {
"total": 0.981927057006942,
"count": 63936,
"self": 0.0,
"children": {
"worker_root": {
"total": 2297.1041579629805,
"count": 63936,
"is_parallel": true,
"self": 914.7469910429681,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002565086999993582,
"count": 1,
"is_parallel": true,
"self": 0.0008018669996090466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017632200003845355,
"count": 8,
"is_parallel": true,
"self": 0.0017632200003845355
}
}
},
"UnityEnvironment.step": {
"total": 0.047035330999960934,
"count": 1,
"is_parallel": true,
"self": 0.0005415480000010575,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005027439999594208,
"count": 1,
"is_parallel": true,
"self": 0.0005027439999594208
},
"communicator.exchange": {
"total": 0.04432649299997138,
"count": 1,
"is_parallel": true,
"self": 0.04432649299997138
},
"steps_from_proto": {
"total": 0.0016645460000290768,
"count": 1,
"is_parallel": true,
"self": 0.0003791359997649124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012854100002641644,
"count": 8,
"is_parallel": true,
"self": 0.0012854100002641644
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1382.3571669200123,
"count": 63935,
"is_parallel": true,
"self": 31.959368816977076,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.360976764018687,
"count": 63935,
"is_parallel": true,
"self": 24.360976764018687
},
"communicator.exchange": {
"total": 1226.9404408910277,
"count": 63935,
"is_parallel": true,
"self": 1226.9404408910277
},
"steps_from_proto": {
"total": 99.09638044798885,
"count": 63935,
"is_parallel": true,
"self": 21.68372778399089,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.41265266399796,
"count": 511480,
"is_parallel": true,
"self": 77.41265266399796
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 658.78929712998,
"count": 63936,
"self": 2.9694343249700523,
"children": {
"process_trajectory": {
"total": 126.93954239100731,
"count": 63936,
"self": 126.68060574600713,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2589366450001762,
"count": 2,
"self": 0.2589366450001762
}
}
},
"_update_policy": {
"total": 528.8803204140027,
"count": 460,
"self": 338.208335945983,
"children": {
"TorchPPOOptimizer.update": {
"total": 190.67198446801967,
"count": 22788,
"self": 190.67198446801967
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5380001059384085e-06,
"count": 1,
"self": 1.5380001059384085e-06
},
"TrainerController._save_models": {
"total": 0.14665331200012588,
"count": 1,
"self": 0.0018620229998305149,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14479128900029536,
"count": 1,
"self": 0.14479128900029536
}
}
}
}
}
}
}