ppo-Pyramids / run_logs /timers.json
ljones's picture
First commit
97e604e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.9326035380363464,
"min": 0.8961029052734375,
"max": 1.5893805027008057,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 27873.654296875,
"min": 26739.7109375,
"max": 48215.4453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1975192278623581,
"min": -0.09932968765497208,
"max": 0.1975192278623581,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 50.36740493774414,
"min": -23.739795684814453,
"max": 50.36740493774414,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00669124536216259,
"min": 0.0025217432994395494,
"max": 0.4939653277397156,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.7062675952911377,
"min": 0.6178271174430847,
"max": 118.55168151855469,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07088001937583427,
"min": 0.0649382138043506,
"max": 0.07313593165599741,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.2835200775033371,
"min": 0.1431050010835369,
"max": 0.2912471030964904,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011226112884760836,
"min": 0.00010854629627919817,
"max": 0.01829803425423575,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.04490445153904334,
"min": 0.0003256388888375945,
"max": 0.04490445153904334,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.361797319150003e-06,
"min": 5.361797319150003e-06,
"max": 0.00019696640151679992,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.1447189276600012e-05,
"min": 2.1447189276600012e-05,
"max": 0.0007406106296947001,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10268085000000002,
"min": 0.10268085000000002,
"max": 0.19848320000000003,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.41072340000000007,
"min": 0.3167341,
"max": 0.7703053,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002778169150000002,
"min": 0.0002778169150000002,
"max": 0.009848471679999998,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0011112676600000008,
"min": 0.0011112676600000008,
"max": 0.037033499469999995,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013002083636820316,
"min": 0.013002083636820316,
"max": 0.6207387447357178,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.052008334547281265,
"min": 0.03997467830777168,
"max": 1.2414774894714355,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 605.6666666666666,
"min": 605.6666666666666,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32706.0,
"min": 15984.0,
"max": 33288.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.023837001649318,
"min": -1.0000000521540642,
"max": 1.023837001649318,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 55.28719808906317,
"min": -32.000001668930054,
"max": 55.28719808906317,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.023837001649318,
"min": -1.0000000521540642,
"max": 1.023837001649318,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 55.28719808906317,
"min": -32.000001668930054,
"max": 55.28719808906317,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08042258844620341,
"min": 0.08042258844620341,
"max": 16.28409444913268,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.342819776094984,
"min": 4.096232972282451,
"max": 260.5455111861229,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679312061",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 3 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679314088"
},
"total": 2027.4946909680002,
"count": 1,
"self": 0.4753499179996652,
"children": {
"run_training.setup": {
"total": 0.10604798199983634,
"count": 1,
"self": 0.10604798199983634
},
"TrainerController.start_learning": {
"total": 2026.9132930680007,
"count": 1,
"self": 1.5611527979644961,
"children": {
"TrainerController._reset_env": {
"total": 6.181209043999843,
"count": 1,
"self": 6.181209043999843
},
"TrainerController.advance": {
"total": 2019.071749045036,
"count": 63226,
"self": 1.672508703094536,
"children": {
"env_step": {
"total": 1400.2798438280306,
"count": 63226,
"self": 1282.44390394313,
"children": {
"SubprocessEnvManager._take_step": {
"total": 116.90466251996531,
"count": 63226,
"self": 5.027841130777233,
"children": {
"TorchPolicy.evaluate": {
"total": 111.87682138918808,
"count": 62551,
"self": 111.87682138918808
}
}
},
"workers": {
"total": 0.9312773649353403,
"count": 63226,
"self": 0.0,
"children": {
"worker_root": {
"total": 2021.7705289009664,
"count": 63226,
"is_parallel": true,
"self": 862.1165915930906,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019063569998252206,
"count": 1,
"is_parallel": true,
"self": 0.0006194679990585428,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012868890007666778,
"count": 8,
"is_parallel": true,
"self": 0.0012868890007666778
}
}
},
"UnityEnvironment.step": {
"total": 0.09352730000045995,
"count": 1,
"is_parallel": true,
"self": 0.0005239249994701822,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005590920000031474,
"count": 1,
"is_parallel": true,
"self": 0.0005590920000031474
},
"communicator.exchange": {
"total": 0.090812277000623,
"count": 1,
"is_parallel": true,
"self": 0.090812277000623
},
"steps_from_proto": {
"total": 0.0016320060003636172,
"count": 1,
"is_parallel": true,
"self": 0.000396606001231703,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012353999991319142,
"count": 8,
"is_parallel": true,
"self": 0.0012353999991319142
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1159.6539373078758,
"count": 63225,
"is_parallel": true,
"self": 31.2926483210058,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.47452290309593,
"count": 63225,
"is_parallel": true,
"self": 24.47452290309593
},
"communicator.exchange": {
"total": 1008.2148882498814,
"count": 63225,
"is_parallel": true,
"self": 1008.2148882498814
},
"steps_from_proto": {
"total": 95.67187783389272,
"count": 63225,
"is_parallel": true,
"self": 20.974489489864027,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.6973883440287,
"count": 505800,
"is_parallel": true,
"self": 74.6973883440287
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 617.1193965139109,
"count": 63226,
"self": 2.7525204688581653,
"children": {
"process_trajectory": {
"total": 123.16826495605983,
"count": 63226,
"self": 122.94530077006038,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22296418599944445,
"count": 2,
"self": 0.22296418599944445
}
}
},
"_update_policy": {
"total": 491.19861108899295,
"count": 117,
"self": 311.34240499203224,
"children": {
"TorchPPOOptimizer.update": {
"total": 179.8562060969607,
"count": 23139,
"self": 179.8562060969607
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.590003173798323e-07,
"count": 1,
"self": 9.590003173798323e-07
},
"TrainerController._save_models": {
"total": 0.09918122199997015,
"count": 1,
"self": 0.0013517439992938307,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09782947800067632,
"count": 1,
"self": 0.09782947800067632
}
}
}
}
}
}
}