ppo-Huggy / run_logs /timers.json
HeineKayn's picture
Huggy
e7b4df2
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4107227325439453,
"min": 1.4107227325439453,
"max": 1.4295623302459717,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70352.7421875,
"min": 68591.4921875,
"max": 78600.265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.69157088122606,
"min": 83.12289562289563,
"max": 405.91129032258067,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49429.0,
"min": 49179.0,
"max": 50333.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999964.0,
"min": 49788.0,
"max": 1999964.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999964.0,
"min": 49788.0,
"max": 1999964.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.353489398956299,
"min": 0.10842138528823853,
"max": 2.4275991916656494,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1228.521484375,
"min": 13.335830688476562,
"max": 1399.5899658203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6913074645502815,
"min": 1.6421974119616718,
"max": 3.924011243150589,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1926.862496495247,
"min": 201.99028167128563,
"max": 2190.3236235380173,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6913074645502815,
"min": 1.6421974119616718,
"max": 3.924011243150589,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1926.862496495247,
"min": 201.99028167128563,
"max": 2190.3236235380173,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01588782660801371,
"min": 0.01407488593152569,
"max": 0.021118302741767063,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03177565321602742,
"min": 0.02814977186305138,
"max": 0.05596572636665466,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0534738602116704,
"min": 0.022025457428147394,
"max": 0.059929059694210696,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1069477204233408,
"min": 0.04405091485629479,
"max": 0.1797871790826321,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.474523508525011e-06,
"min": 4.474523508525011e-06,
"max": 0.00029533740155420006,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.949047017050022e-06,
"min": 8.949047017050022e-06,
"max": 0.0008438274187242001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101491475,
"min": 0.101491475,
"max": 0.19844580000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20298295,
"min": 0.20298295,
"max": 0.5812758,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.442460250000018e-05,
"min": 8.442460250000018e-05,
"max": 0.004922445419999998,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016884920500000037,
"min": 0.00016884920500000037,
"max": 0.01406566242,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670668390",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670670850"
},
"total": 2459.972561273,
"count": 1,
"self": 0.7786739059997672,
"children": {
"run_training.setup": {
"total": 0.12544310200007658,
"count": 1,
"self": 0.12544310200007658
},
"TrainerController.start_learning": {
"total": 2459.068444265,
"count": 1,
"self": 4.268754074951175,
"children": {
"TrainerController._reset_env": {
"total": 12.757687393000083,
"count": 1,
"self": 12.757687393000083
},
"TrainerController.advance": {
"total": 2441.842756204049,
"count": 231364,
"self": 4.526452754152615,
"children": {
"env_step": {
"total": 1909.3663134559188,
"count": 231364,
"self": 1603.289718851905,
"children": {
"SubprocessEnvManager._take_step": {
"total": 303.22230513199895,
"count": 231364,
"self": 15.805036388935378,
"children": {
"TorchPolicy.evaluate": {
"total": 287.4172687430636,
"count": 222909,
"self": 70.97778242104732,
"children": {
"TorchPolicy.sample_actions": {
"total": 216.43948632201625,
"count": 222909,
"self": 216.43948632201625
}
}
}
}
},
"workers": {
"total": 2.854289472014898,
"count": 231364,
"self": 0.0,
"children": {
"worker_root": {
"total": 2450.446023813053,
"count": 231364,
"is_parallel": true,
"self": 1129.8269741810504,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0061093709999795465,
"count": 1,
"is_parallel": true,
"self": 0.0003916910000043572,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005717679999975189,
"count": 2,
"is_parallel": true,
"self": 0.005717679999975189
}
}
},
"UnityEnvironment.step": {
"total": 0.02859458000000359,
"count": 1,
"is_parallel": true,
"self": 0.0002942849999953978,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002047370001037052,
"count": 1,
"is_parallel": true,
"self": 0.0002047370001037052
},
"communicator.exchange": {
"total": 0.027290732999972533,
"count": 1,
"is_parallel": true,
"self": 0.027290732999972533
},
"steps_from_proto": {
"total": 0.0008048249999319523,
"count": 1,
"is_parallel": true,
"self": 0.0002580369998668175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005467880000651348,
"count": 2,
"is_parallel": true,
"self": 0.0005467880000651348
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1320.6190496320028,
"count": 231363,
"is_parallel": true,
"self": 37.969915015889455,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.11812727010079,
"count": 231363,
"is_parallel": true,
"self": 82.11812727010079
},
"communicator.exchange": {
"total": 1098.3394021139256,
"count": 231363,
"is_parallel": true,
"self": 1098.3394021139256
},
"steps_from_proto": {
"total": 102.19160523208689,
"count": 231363,
"is_parallel": true,
"self": 41.881819970173524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.30978526191336,
"count": 462726,
"is_parallel": true,
"self": 60.30978526191336
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 527.9499899939775,
"count": 231364,
"self": 6.80893206404437,
"children": {
"process_trajectory": {
"total": 157.4056366399352,
"count": 231364,
"self": 156.83135603093524,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5742806089999704,
"count": 4,
"self": 0.5742806089999704
}
}
},
"_update_policy": {
"total": 363.73542128999793,
"count": 96,
"self": 305.915886920993,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.81953436900494,
"count": 2880,
"self": 57.81953436900494
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4739998732693493e-06,
"count": 1,
"self": 1.4739998732693493e-06
},
"TrainerController._save_models": {
"total": 0.19924511899989739,
"count": 1,
"self": 0.003003160999924148,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19624195799997324,
"count": 1,
"self": 0.19624195799997324
}
}
}
}
}
}
}