ppo-Pyramids / run_logs /timers.json
andli28's picture
First Push
7564fe6
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4738803803920746,
"min": 0.4738803803920746,
"max": 1.4607224464416504,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14413.5458984375,
"min": 14120.5009765625,
"max": 44312.4765625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989982.0,
"min": 29940.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989982.0,
"min": 29940.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5412266850471497,
"min": -0.13136444985866547,
"max": 0.6165623068809509,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 153.16714477539062,
"min": -31.527467727661133,
"max": 170.7039794921875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02286222204566002,
"min": -0.004147015977650881,
"max": 0.3554820120334625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.470008850097656,
"min": -1.1196943521499634,
"max": 84.60472106933594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06751539292862686,
"min": 0.06560502753236581,
"max": 0.0734383504443341,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.945215501000776,
"min": 0.4794338138087588,
"max": 1.0573242691558942,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015086740736302751,
"min": 0.0001971077924592591,
"max": 0.01669634478873507,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2112143703082385,
"min": 0.002365293509511109,
"max": 0.23374882704229094,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.711061715392855e-06,
"min": 7.711061715392855e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010795486401549996,
"min": 0.00010795486401549996,
"max": 0.0036088143970618987,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257032142857145,
"min": 0.10257032142857145,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359845000000002,
"min": 1.3886848,
"max": 2.6174056,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026677511071428563,
"min": 0.00026677511071428563,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003734851549999999,
"min": 0.003734851549999999,
"max": 0.12030351618999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009258056990802288,
"min": 0.009258056990802288,
"max": 0.42740482091903687,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12961280345916748,
"min": 0.12961280345916748,
"max": 2.9918336868286133,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 318.3578947368421,
"min": 313.13978494623655,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30244.0,
"min": 17634.0,
"max": 32124.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6395284006470128,
"min": -0.9998875521123409,
"max": 1.6438386901732414,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 155.75519806146622,
"min": -31.99640166759491,
"max": 155.75519806146622,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6395284006470128,
"min": -0.9998875521123409,
"max": 1.6438386901732414,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 155.75519806146622,
"min": -31.99640166759491,
"max": 155.75519806146622,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03104400059306308,
"min": 0.03104400059306308,
"max": 7.738087490200996,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9491800563409925,
"min": 2.9491800563409925,
"max": 139.28557482361794,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680815077",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680817319"
},
"total": 2242.312401243,
"count": 1,
"self": 0.5862385919999724,
"children": {
"run_training.setup": {
"total": 0.1830759209997268,
"count": 1,
"self": 0.1830759209997268
},
"TrainerController.start_learning": {
"total": 2241.54308673,
"count": 1,
"self": 1.4299370100425222,
"children": {
"TrainerController._reset_env": {
"total": 5.005564408000282,
"count": 1,
"self": 5.005564408000282
},
"TrainerController.advance": {
"total": 2235.012954189958,
"count": 63833,
"self": 1.4810279728708338,
"children": {
"env_step": {
"total": 1611.9200069169974,
"count": 63833,
"self": 1500.423559084994,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.62071218597794,
"count": 63833,
"self": 4.937781984981484,
"children": {
"TorchPolicy.evaluate": {
"total": 105.68293020099645,
"count": 62560,
"self": 105.68293020099645
}
}
},
"workers": {
"total": 0.8757356460255323,
"count": 63833,
"self": 0.0,
"children": {
"worker_root": {
"total": 2236.30132122501,
"count": 63833,
"is_parallel": true,
"self": 853.1551611910945,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002823130999786372,
"count": 1,
"is_parallel": true,
"self": 0.000895879000381683,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019272519994046888,
"count": 8,
"is_parallel": true,
"self": 0.0019272519994046888
}
}
},
"UnityEnvironment.step": {
"total": 0.04936992500006454,
"count": 1,
"is_parallel": true,
"self": 0.0005599539990726043,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000500226000440307,
"count": 1,
"is_parallel": true,
"self": 0.000500226000440307
},
"communicator.exchange": {
"total": 0.04666081499999564,
"count": 1,
"is_parallel": true,
"self": 0.04666081499999564
},
"steps_from_proto": {
"total": 0.0016489300005559926,
"count": 1,
"is_parallel": true,
"self": 0.0003926430026695016,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001256286997886491,
"count": 8,
"is_parallel": true,
"self": 0.001256286997886491
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1383.1461600339153,
"count": 63832,
"is_parallel": true,
"self": 33.43392795415639,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.96604650007248,
"count": 63832,
"is_parallel": true,
"self": 24.96604650007248
},
"communicator.exchange": {
"total": 1223.3712738188997,
"count": 63832,
"is_parallel": true,
"self": 1223.3712738188997
},
"steps_from_proto": {
"total": 101.37491176078674,
"count": 63832,
"is_parallel": true,
"self": 22.040230308604805,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.33468145218194,
"count": 510656,
"is_parallel": true,
"self": 79.33468145218194
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 621.6119193000895,
"count": 63833,
"self": 2.5367783892424995,
"children": {
"process_trajectory": {
"total": 110.94901360384392,
"count": 63833,
"self": 110.73724940084321,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21176420300071186,
"count": 2,
"self": 0.21176420300071186
}
}
},
"_update_policy": {
"total": 508.1261273070031,
"count": 453,
"self": 325.4129669689728,
"children": {
"TorchPPOOptimizer.update": {
"total": 182.71316033803032,
"count": 22794,
"self": 182.71316033803032
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.489995136391371e-07,
"count": 1,
"self": 9.489995136391371e-07
},
"TrainerController._save_models": {
"total": 0.0946301730000414,
"count": 1,
"self": 0.0014569019995178678,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09317327100052353,
"count": 1,
"self": 0.09317327100052353
}
}
}
}
}
}
}