ppo-pyramidsRND / run_logs /timers.json
sd99's picture
pushing to HF
7520e88
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3872012197971344,
"min": 0.3747113049030304,
"max": 1.4528653621673584,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11702.76953125,
"min": 11199.37109375,
"max": 44074.125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6368499398231506,
"min": -0.17317768931388855,
"max": 0.7622795104980469,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 183.41278076171875,
"min": -41.043113708496094,
"max": 221.82333374023438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0061710006557404995,
"min": -0.00518934428691864,
"max": 0.35376763343811035,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.7772481441497803,
"min": -1.4426376819610596,
"max": 83.87799835205078,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07160436773109537,
"min": 0.06653333066302973,
"max": 0.07313094476176914,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0024611482353352,
"min": 0.47940585663827834,
"max": 1.0750013964008722,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014009311910753247,
"min": 0.0008748433836523896,
"max": 0.016045455221111685,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19613036675054546,
"min": 0.0061239036855667275,
"max": 0.23512779345037418,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.356354690771427e-06,
"min": 7.356354690771427e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010298896567079998,
"min": 0.00010298896567079998,
"max": 0.0035070896309702,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245208571428573,
"min": 0.10245208571428573,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343292,
"min": 1.3691136000000002,
"max": 2.5690297999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002549633628571428,
"min": 0.0002549633628571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035694870799999993,
"min": 0.0035694870799999993,
"max": 0.11692607702,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009113323874771595,
"min": 0.009113323874771595,
"max": 0.5228285193443298,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12758652865886688,
"min": 0.12758652865886688,
"max": 3.659799575805664,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 288.921568627451,
"min": 256.9380530973451,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29470.0,
"min": 15984.0,
"max": 33179.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6718529215922542,
"min": -1.0000000521540642,
"max": 1.7416696277047907,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 170.52899800240993,
"min": -32.000001668930054,
"max": 201.5527983903885,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6718529215922542,
"min": -1.0000000521540642,
"max": 1.7416696277047907,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 170.52899800240993,
"min": -32.000001668930054,
"max": 201.5527983903885,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.027393831923665644,
"min": 0.026261360240372844,
"max": 11.211362191475928,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7941708562138956,
"min": 2.7941708562138956,
"max": 179.38179506361485,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673670707",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673672943"
},
"total": 2235.5818532040003,
"count": 1,
"self": 0.49098040400031095,
"children": {
"run_training.setup": {
"total": 0.11482328300002109,
"count": 1,
"self": 0.11482328300002109
},
"TrainerController.start_learning": {
"total": 2234.976049517,
"count": 1,
"self": 1.320331274997443,
"children": {
"TrainerController._reset_env": {
"total": 10.010990612,
"count": 1,
"self": 10.010990612
},
"TrainerController.advance": {
"total": 2223.543341622002,
"count": 64250,
"self": 1.332534928917994,
"children": {
"env_step": {
"total": 1527.537925132046,
"count": 64250,
"self": 1418.8975299270314,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.79130019098744,
"count": 64250,
"self": 4.352860237020707,
"children": {
"TorchPolicy.evaluate": {
"total": 103.43843995396674,
"count": 62580,
"self": 34.86729878791476,
"children": {
"TorchPolicy.sample_actions": {
"total": 68.57114116605197,
"count": 62580,
"self": 68.57114116605197
}
}
}
}
},
"workers": {
"total": 0.8490950140271138,
"count": 64250,
"self": 0.0,
"children": {
"worker_root": {
"total": 2230.717939842983,
"count": 64250,
"is_parallel": true,
"self": 914.6002333290176,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006549218000031942,
"count": 1,
"is_parallel": true,
"self": 0.003949857000066004,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025993609999659384,
"count": 8,
"is_parallel": true,
"self": 0.0025993609999659384
}
}
},
"UnityEnvironment.step": {
"total": 0.06243445900003053,
"count": 1,
"is_parallel": true,
"self": 0.0004868220000275869,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004444169999828773,
"count": 1,
"is_parallel": true,
"self": 0.0004444169999828773
},
"communicator.exchange": {
"total": 0.059882521000020006,
"count": 1,
"is_parallel": true,
"self": 0.059882521000020006
},
"steps_from_proto": {
"total": 0.0016206990000000587,
"count": 1,
"is_parallel": true,
"self": 0.00043429400005834395,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011864049999417148,
"count": 8,
"is_parallel": true,
"self": 0.0011864049999417148
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1316.1177065139655,
"count": 64249,
"is_parallel": true,
"self": 28.755428988956737,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.727184803023306,
"count": 64249,
"is_parallel": true,
"self": 22.727184803023306
},
"communicator.exchange": {
"total": 1169.9402648049918,
"count": 64249,
"is_parallel": true,
"self": 1169.9402648049918
},
"steps_from_proto": {
"total": 94.69482791699386,
"count": 64249,
"is_parallel": true,
"self": 22.445723089830324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.24910482716353,
"count": 513992,
"is_parallel": true,
"self": 72.24910482716353
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 694.6728815610379,
"count": 64250,
"self": 2.571614786048258,
"children": {
"process_trajectory": {
"total": 151.73174673799093,
"count": 64250,
"self": 151.5275948639907,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2041518740002175,
"count": 2,
"self": 0.2041518740002175
}
}
},
"_update_policy": {
"total": 540.3695200369987,
"count": 445,
"self": 207.58283358500591,
"children": {
"TorchPPOOptimizer.update": {
"total": 332.78668645199275,
"count": 22851,
"self": 332.78668645199275
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0350004231440835e-06,
"count": 1,
"self": 1.0350004231440835e-06
},
"TrainerController._save_models": {
"total": 0.10138497299976734,
"count": 1,
"self": 0.0015560549995825568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09982891800018479,
"count": 1,
"self": 0.09982891800018479
}
}
}
}
}
}
}