ppo-Huggy / run_logs /timers.json
joelkoch's picture
Huggy
41539a1
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4037810564041138,
"min": 1.4037810564041138,
"max": 1.425986647605896,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69850.7421875,
"min": 68807.46875,
"max": 77534.203125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 77.76498422712933,
"min": 74.69545454545455,
"max": 390.5625,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49303.0,
"min": 49293.0,
"max": 50249.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999992.0,
"min": 49911.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999992.0,
"min": 49911.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.515214681625366,
"min": 0.03152647614479065,
"max": 2.515214681625366,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1594.6461181640625,
"min": 4.003862380981445,
"max": 1637.996826171875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8700974560122385,
"min": 1.989541540465017,
"max": 3.928014337563243,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2453.641787111759,
"min": 252.67177563905716,
"max": 2575.007646560669,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8700974560122385,
"min": 1.989541540465017,
"max": 3.928014337563243,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2453.641787111759,
"min": 252.67177563905716,
"max": 2575.007646560669,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017190775419355004,
"min": 0.012048113543520837,
"max": 0.021088772183065884,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05157232625806501,
"min": 0.024096227087041674,
"max": 0.05862154140477287,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05633601049582163,
"min": 0.023583166021853688,
"max": 0.062231643311679366,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1690080314874649,
"min": 0.047166332043707376,
"max": 0.1766928769648075,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4530488490166784e-06,
"min": 3.4530488490166784e-06,
"max": 0.000295310176563275,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0359146547050035e-05,
"min": 1.0359146547050035e-05,
"max": 0.0008441458686180498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115098333333335,
"min": 0.10115098333333335,
"max": 0.19843672500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30345295000000005,
"min": 0.20744095,
"max": 0.58138195,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.743406833333348e-05,
"min": 6.743406833333348e-05,
"max": 0.004921992577500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020230220500000044,
"min": 0.00020230220500000044,
"max": 0.014070959305000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670705313",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670707468"
},
"total": 2155.814031908,
"count": 1,
"self": 0.4391674209996381,
"children": {
"run_training.setup": {
"total": 0.12013414599999805,
"count": 1,
"self": 0.12013414599999805
},
"TrainerController.start_learning": {
"total": 2155.254730341,
"count": 1,
"self": 3.8985743829475723,
"children": {
"TrainerController._reset_env": {
"total": 11.032539977999988,
"count": 1,
"self": 11.032539977999988
},
"TrainerController.advance": {
"total": 2140.2095230340524,
"count": 232658,
"self": 3.9305065840139832,
"children": {
"env_step": {
"total": 1672.6334387290326,
"count": 232658,
"self": 1402.6431136959673,
"children": {
"SubprocessEnvManager._take_step": {
"total": 267.46152558500114,
"count": 232658,
"self": 14.164664692954716,
"children": {
"TorchPolicy.evaluate": {
"total": 253.29686089204642,
"count": 222878,
"self": 64.07534870909495,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.22151218295147,
"count": 222878,
"self": 189.22151218295147
}
}
}
}
},
"workers": {
"total": 2.528799448064319,
"count": 232658,
"self": 0.0,
"children": {
"worker_root": {
"total": 2147.6833838110515,
"count": 232658,
"is_parallel": true,
"self": 996.2486695780408,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020323669999697813,
"count": 1,
"is_parallel": true,
"self": 0.0003148229999965224,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001717543999973259,
"count": 2,
"is_parallel": true,
"self": 0.001717543999973259
}
}
},
"UnityEnvironment.step": {
"total": 0.02740084399999887,
"count": 1,
"is_parallel": true,
"self": 0.00028881000002911605,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002049189999979717,
"count": 1,
"is_parallel": true,
"self": 0.0002049189999979717
},
"communicator.exchange": {
"total": 0.02617830799999865,
"count": 1,
"is_parallel": true,
"self": 0.02617830799999865
},
"steps_from_proto": {
"total": 0.0007288069999731306,
"count": 1,
"is_parallel": true,
"self": 0.00023418799997898532,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004946189999941453,
"count": 2,
"is_parallel": true,
"self": 0.0004946189999941453
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1151.4347142330107,
"count": 232657,
"is_parallel": true,
"self": 34.28079139919214,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.01986746092678,
"count": 232657,
"is_parallel": true,
"self": 74.01986746092678
},
"communicator.exchange": {
"total": 951.5762621139231,
"count": 232657,
"is_parallel": true,
"self": 951.5762621139231
},
"steps_from_proto": {
"total": 91.55779325896879,
"count": 232657,
"is_parallel": true,
"self": 37.60576332789367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.95202993107512,
"count": 465314,
"is_parallel": true,
"self": 53.95202993107512
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 463.6455777210057,
"count": 232658,
"self": 6.032147097014729,
"children": {
"process_trajectory": {
"total": 144.64725265699155,
"count": 232658,
"self": 144.16816420799103,
"children": {
"RLTrainer._checkpoint": {
"total": 0.47908844900052827,
"count": 4,
"self": 0.47908844900052827
}
}
},
"_update_policy": {
"total": 312.9661779669994,
"count": 97,
"self": 259.7302032199961,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.2359747470033,
"count": 2910,
"self": 53.2359747470033
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.020000106829684e-07,
"count": 1,
"self": 9.020000106829684e-07
},
"TrainerController._save_models": {
"total": 0.11409204400024464,
"count": 1,
"self": 0.0026777110001603432,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1114143330000843,
"count": 1,
"self": 0.1114143330000843
}
}
}
}
}
}
}