Pyramids / run_logs /timers.json
Pongsathorn's picture
First Push
2ee300d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5188910365104675,
"min": 0.5177847743034363,
"max": 1.397757649421692,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15541.82421875,
"min": 15516.9736328125,
"max": 42402.375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989881.0,
"min": 29952.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989881.0,
"min": 29952.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.45750558376312256,
"min": -0.0848151370882988,
"max": 0.4844558835029602,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 122.61149597167969,
"min": -20.440448760986328,
"max": 130.31863403320312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.029161421582102776,
"min": -0.029161421582102776,
"max": 0.39571771025657654,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -7.815260887145996,
"min": -7.815260887145996,
"max": 93.78509521484375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06792384513065651,
"min": 0.06490958266658707,
"max": 0.0734338414211593,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0188576769598476,
"min": 0.48358025351283673,
"max": 1.044565084176914,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013090611569502761,
"min": 0.0012658782114416747,
"max": 0.01433566063062871,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19635917354254143,
"min": 0.008861147480091723,
"max": 0.20069924882880194,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.571437476220002e-06,
"min": 7.571437476220002e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011357156214330002,
"min": 0.00011357156214330002,
"max": 0.0037592242469252995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252378000000002,
"min": 0.10252378000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5378567000000003,
"min": 1.3691136000000002,
"max": 2.6530747,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000262125622,
"min": 0.000262125622,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00393188433,
"min": 0.00393188433,
"max": 0.12532216252999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009280054830014706,
"min": 0.009280054830014706,
"max": 0.4777162969112396,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.139200821518898,
"min": 0.13153129816055298,
"max": 3.3440141677856445,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 419.57142857142856,
"min": 383.35135135135135,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29370.0,
"min": 15984.0,
"max": 33449.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.437545693559306,
"min": -1.0000000521540642,
"max": 1.51210956234638,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 100.62819854915142,
"min": -32.000001668930054,
"max": 111.62999815493822,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.437545693559306,
"min": -1.0000000521540642,
"max": 1.51210956234638,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 100.62819854915142,
"min": -32.000001668930054,
"max": 111.62999815493822,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.039889879483546664,
"min": 0.039889879483546664,
"max": 9.342226451262832,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7922915638482664,
"min": 2.7922915638482664,
"max": 149.4756232202053,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1712424698",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1712427072"
},
"total": 2374.1065205319996,
"count": 1,
"self": 0.8988956339994729,
"children": {
"run_training.setup": {
"total": 0.05122429600010037,
"count": 1,
"self": 0.05122429600010037
},
"TrainerController.start_learning": {
"total": 2373.156400602,
"count": 1,
"self": 1.8948408220157944,
"children": {
"TrainerController._reset_env": {
"total": 2.12626107899996,
"count": 1,
"self": 2.12626107899996
},
"TrainerController.advance": {
"total": 2368.9788220879836,
"count": 63800,
"self": 1.9723670110611238,
"children": {
"env_step": {
"total": 1718.5323056149036,
"count": 63800,
"self": 1554.2027717299395,
"children": {
"SubprocessEnvManager._take_step": {
"total": 163.11332203897496,
"count": 63800,
"self": 6.11540640999533,
"children": {
"TorchPolicy.evaluate": {
"total": 156.99791562897963,
"count": 62569,
"self": 156.99791562897963
}
}
},
"workers": {
"total": 1.2162118459891644,
"count": 63800,
"self": 0.0,
"children": {
"worker_root": {
"total": 2366.7908769890164,
"count": 63800,
"is_parallel": true,
"self": 962.5893403780196,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021733149999363377,
"count": 1,
"is_parallel": true,
"self": 0.0006604779998724553,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015128370000638824,
"count": 8,
"is_parallel": true,
"self": 0.0015128370000638824
}
}
},
"UnityEnvironment.step": {
"total": 0.05200437600001351,
"count": 1,
"is_parallel": true,
"self": 0.0006921819999661238,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004368110000996239,
"count": 1,
"is_parallel": true,
"self": 0.0004368110000996239
},
"communicator.exchange": {
"total": 0.049203923000050054,
"count": 1,
"is_parallel": true,
"self": 0.049203923000050054
},
"steps_from_proto": {
"total": 0.0016714599998977064,
"count": 1,
"is_parallel": true,
"self": 0.0003625279998686892,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013089320000290172,
"count": 8,
"is_parallel": true,
"self": 0.0013089320000290172
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1404.2015366109968,
"count": 63799,
"is_parallel": true,
"self": 40.03658548795215,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.603799241987417,
"count": 63799,
"is_parallel": true,
"self": 25.603799241987417
},
"communicator.exchange": {
"total": 1226.304475118033,
"count": 63799,
"is_parallel": true,
"self": 1226.304475118033
},
"steps_from_proto": {
"total": 112.25667676302419,
"count": 63799,
"is_parallel": true,
"self": 24.2227383300567,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.03393843296749,
"count": 510392,
"is_parallel": true,
"self": 88.03393843296749
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 648.4741494620189,
"count": 63800,
"self": 3.5333817869613995,
"children": {
"process_trajectory": {
"total": 135.71939528406006,
"count": 63800,
"self": 135.47919080605948,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2402044780005781,
"count": 2,
"self": 0.2402044780005781
}
}
},
"_update_policy": {
"total": 509.22137239099743,
"count": 454,
"self": 296.9567129909599,
"children": {
"TorchPPOOptimizer.update": {
"total": 212.26465940003754,
"count": 22758,
"self": 212.26465940003754
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.083999788737856e-06,
"count": 1,
"self": 2.083999788737856e-06
},
"TrainerController._save_models": {
"total": 0.15647452900066128,
"count": 1,
"self": 0.0025207230009982595,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15395380599966302,
"count": 1,
"self": 0.15395380599966302
}
}
}
}
}
}
}