pyramids / run_logs /timers.json
heziyevv's picture
first push
0043d7f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5779170393943787,
"min": 0.5779170393943787,
"max": 1.442322850227356,
"count": 20
},
"Pyramids.Policy.Entropy.sum": {
"value": 17411.484375,
"min": 17411.484375,
"max": 43754.3046875,
"count": 20
},
"Pyramids.Step.mean": {
"value": 599929.0,
"min": 29952.0,
"max": 599929.0,
"count": 20
},
"Pyramids.Step.sum": {
"value": 599929.0,
"min": 29952.0,
"max": 599929.0,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.18661844730377197,
"min": -0.11054674535989761,
"max": 0.18661844730377197,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 47.587703704833984,
"min": -26.641765594482422,
"max": 47.587703704833984,
"count": 20
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016434624791145325,
"min": 0.016434624791145325,
"max": 0.35958778858184814,
"count": 20
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.190829277038574,
"min": 4.190829277038574,
"max": 85.22230529785156,
"count": 20
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06422510723905743,
"min": 0.06422510723905743,
"max": 0.0736837605644214,
"count": 20
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8991515013468041,
"min": 0.5157863239509498,
"max": 1.0369858075282536,
"count": 20
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011843530881174876,
"min": 0.00031659532405810796,
"max": 0.011843530881174876,
"count": 20
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16580943233644826,
"min": 0.004432334536813512,
"max": 0.16580943233644826,
"count": 20
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.993061621392858e-06,
"min": 7.993061621392858e-06,
"max": 0.0002919177169798095,
"count": 20
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011190286269950001,
"min": 0.00011190286269950001,
"max": 0.0032213517262161665,
"count": 20
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10266432142857143,
"min": 0.10266432142857143,
"max": 0.1973059047619048,
"count": 20
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4373005,
"min": 1.3811413333333336,
"max": 2.3737838333333334,
"count": 20
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00027616571071428575,
"min": 0.00027616571071428575,
"max": 0.009730859885714286,
"count": 20
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038663199500000005,
"min": 0.0038663199500000005,
"max": 0.10740100495,
"count": 20
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0190260112285614,
"min": 0.0190260112285614,
"max": 0.4770812690258026,
"count": 20
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2663641571998596,
"min": 0.2663641571998596,
"max": 3.339568853378296,
"count": 20
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 642.1111111111111,
"min": 642.1111111111111,
"max": 999.0,
"count": 20
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28895.0,
"min": 15984.0,
"max": 33080.0,
"count": 20
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8393434482100217,
"min": -1.0000000521540642,
"max": 0.8393434482100217,
"count": 20
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 38.609798617661,
"min": -29.633201643824577,
"max": 38.609798617661,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8393434482100217,
"min": -1.0000000521540642,
"max": 0.8393434482100217,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 38.609798617661,
"min": -29.633201643824577,
"max": 38.609798617661,
"count": 20
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.12539062814080945,
"min": 0.12539062814080945,
"max": 10.89297050703317,
"count": 20
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.767968894477235,
"min": 5.767968894477235,
"max": 174.2875281125307,
"count": 20
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680166888",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680168061"
},
"total": 1173.0982683469997,
"count": 1,
"self": 0.9909899219996987,
"children": {
"run_training.setup": {
"total": 0.11148743800004013,
"count": 1,
"self": 0.11148743800004013
},
"TrainerController.start_learning": {
"total": 1171.995790987,
"count": 1,
"self": 0.794952939011182,
"children": {
"TrainerController._reset_env": {
"total": 6.782739793000019,
"count": 1,
"self": 6.782739793000019
},
"TrainerController.advance": {
"total": 1164.1038740839886,
"count": 38026,
"self": 0.8418140149165083,
"children": {
"env_step": {
"total": 791.7397666659617,
"count": 38026,
"self": 726.5317094160187,
"children": {
"SubprocessEnvManager._take_step": {
"total": 64.7211457540111,
"count": 38026,
"self": 2.8574207559549905,
"children": {
"TorchPolicy.evaluate": {
"total": 61.863724998056114,
"count": 37568,
"self": 61.863724998056114
}
}
},
"workers": {
"total": 0.48691149593196315,
"count": 38026,
"self": 0.0,
"children": {
"worker_root": {
"total": 1169.0864541419392,
"count": 38026,
"is_parallel": true,
"self": 510.77933398292316,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018568139998933475,
"count": 1,
"is_parallel": true,
"self": 0.0006395450000127312,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012172689998806163,
"count": 8,
"is_parallel": true,
"self": 0.0012172689998806163
}
}
},
"UnityEnvironment.step": {
"total": 0.0762697529999059,
"count": 1,
"is_parallel": true,
"self": 0.0005124630001773767,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048429999969812343,
"count": 1,
"is_parallel": true,
"self": 0.00048429999969812343
},
"communicator.exchange": {
"total": 0.0735128920000534,
"count": 1,
"is_parallel": true,
"self": 0.0735128920000534
},
"steps_from_proto": {
"total": 0.0017600979999770061,
"count": 1,
"is_parallel": true,
"self": 0.00036175599962007254,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013983420003569336,
"count": 8,
"is_parallel": true,
"self": 0.0013983420003569336
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 658.307120159016,
"count": 38025,
"is_parallel": true,
"self": 18.40534018176413,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.86856746406238,
"count": 38025,
"is_parallel": true,
"self": 13.86856746406238
},
"communicator.exchange": {
"total": 570.3618061491584,
"count": 38025,
"is_parallel": true,
"self": 570.3618061491584
},
"steps_from_proto": {
"total": 55.6714063640311,
"count": 38025,
"is_parallel": true,
"self": 11.89616714106387,
"children": {
"_process_rank_one_or_two_observation": {
"total": 43.77523922296723,
"count": 304200,
"is_parallel": true,
"self": 43.77523922296723
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 371.5222934031103,
"count": 38026,
"self": 1.4451327371893967,
"children": {
"process_trajectory": {
"total": 71.76933010892208,
"count": 38026,
"self": 71.66158862192151,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10774148700056685,
"count": 1,
"self": 0.10774148700056685
}
}
},
"_update_policy": {
"total": 298.30783055699885,
"count": 264,
"self": 189.86364553996555,
"children": {
"TorchPPOOptimizer.update": {
"total": 108.4441850170333,
"count": 13671,
"self": 108.4441850170333
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3910002962802537e-06,
"count": 1,
"self": 1.3910002962802537e-06
},
"TrainerController._save_models": {
"total": 0.3142227799999091,
"count": 1,
"self": 0.0018257340007039602,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31239704599920515,
"count": 1,
"self": 0.31239704599920515
}
}
}
}
}
}
}