loges-ppo-Huggy / run_logs /timers.json
Loges's picture
Huggy
c26450b
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4053136110305786,
"min": 1.4053136110305786,
"max": 1.4298957586288452,
"count": 80
},
"Huggy.Policy.Entropy.sum": {
"value": 69692.3125,
"min": 69305.859375,
"max": 77943.9375,
"count": 80
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.56115107913669,
"min": 86.79432624113475,
"max": 393.06299212598424,
"count": 80
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49240.0,
"min": 48952.0,
"max": 49939.0,
"count": 80
},
"Huggy.Step.mean": {
"value": 1999908.0,
"min": 49763.0,
"max": 1999908.0,
"count": 80
},
"Huggy.Step.sum": {
"value": 1999908.0,
"min": 49763.0,
"max": 1999908.0,
"count": 80
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4190115928649902,
"min": -0.018372971564531326,
"max": 2.4488587379455566,
"count": 80
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1344.970458984375,
"min": -2.3149943351745605,
"max": 1366.080810546875,
"count": 80
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.738660923975835,
"min": 1.7561391386247815,
"max": 3.943376674616741,
"count": 80
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2078.695473730564,
"min": 221.2735314667225,
"max": 2169.8029587864876,
"count": 80
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.738660923975835,
"min": 1.7561391386247815,
"max": 3.943376674616741,
"count": 80
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2078.695473730564,
"min": 221.2735314667225,
"max": 2169.8029587864876,
"count": 80
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016244568865254726,
"min": 0.014629874254266421,
"max": 0.019753994263980227,
"count": 80
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04873370659576418,
"min": 0.029259748508532843,
"max": 0.0550822067935466,
"count": 80
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06020368718438678,
"min": 0.022620811220258476,
"max": 0.06140423781341977,
"count": 80
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18061106155316034,
"min": 0.04524162244051695,
"max": 0.1842127134402593,
"count": 80
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.507198830966664e-06,
"min": 3.507198830966664e-06,
"max": 0.0002952727515757499,
"count": 80
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0521596492899992e-05,
"min": 1.0521596492899992e-05,
"max": 0.0008440794186402,
"count": 80
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116903333333334,
"min": 0.10116903333333334,
"max": 0.19842425,
"count": 80
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30350710000000003,
"min": 0.20753129999999997,
"max": 0.5813598000000001,
"count": 80
},
"Huggy.Policy.Beta.mean": {
"value": 6.833476333333327e-05,
"min": 6.833476333333327e-05,
"max": 0.0049213700750000006,
"count": 80
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020500428999999982,
"min": 0.00020500428999999982,
"max": 0.01406985402,
"count": 80
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678356527",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678358922"
},
"total": 2394.843785806,
"count": 1,
"self": 0.44788424000034865,
"children": {
"run_training.setup": {
"total": 0.17183435599997665,
"count": 1,
"self": 0.17183435599997665
},
"TrainerController.start_learning": {
"total": 2394.2240672099997,
"count": 1,
"self": 4.2973906599168,
"children": {
"TrainerController._reset_env": {
"total": 10.917074234999973,
"count": 1,
"self": 10.917074234999973
},
"TrainerController.advance": {
"total": 2378.9017313960826,
"count": 231434,
"self": 4.553559826068067,
"children": {
"env_step": {
"total": 1854.2095438499791,
"count": 231434,
"self": 1547.7315536520537,
"children": {
"SubprocessEnvManager._take_step": {
"total": 303.62991817293414,
"count": 231434,
"self": 17.14592465595649,
"children": {
"TorchPolicy.evaluate": {
"total": 286.48399351697765,
"count": 222899,
"self": 286.48399351697765
}
}
},
"workers": {
"total": 2.8480720249914384,
"count": 231434,
"self": 0.0,
"children": {
"worker_root": {
"total": 2386.2847242220755,
"count": 231434,
"is_parallel": true,
"self": 1129.7847676090996,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009784049999552735,
"count": 1,
"is_parallel": true,
"self": 0.00032962199986741325,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006487830000878603,
"count": 2,
"is_parallel": true,
"self": 0.0006487830000878603
}
}
},
"UnityEnvironment.step": {
"total": 0.03207003399995756,
"count": 1,
"is_parallel": true,
"self": 0.00027573299996674905,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024355899995498476,
"count": 1,
"is_parallel": true,
"self": 0.00024355899995498476
},
"communicator.exchange": {
"total": 0.03089702399995531,
"count": 1,
"is_parallel": true,
"self": 0.03089702399995531
},
"steps_from_proto": {
"total": 0.0006537180000805165,
"count": 1,
"is_parallel": true,
"self": 0.00018966300001466152,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000464055000065855,
"count": 2,
"is_parallel": true,
"self": 0.000464055000065855
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1256.499956612976,
"count": 231433,
"is_parallel": true,
"self": 37.06242707310298,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.70556047488799,
"count": 231433,
"is_parallel": true,
"self": 76.70556047488799
},
"communicator.exchange": {
"total": 1054.1291853910016,
"count": 231433,
"is_parallel": true,
"self": 1054.1291853910016
},
"steps_from_proto": {
"total": 88.60278367398337,
"count": 231433,
"is_parallel": true,
"self": 33.02350563293055,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.57927804105282,
"count": 462866,
"is_parallel": true,
"self": 55.57927804105282
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 520.1386277200355,
"count": 231434,
"self": 6.860276119942341,
"children": {
"process_trajectory": {
"total": 145.18860303009262,
"count": 231434,
"self": 143.95428617009281,
"children": {
"RLTrainer._checkpoint": {
"total": 1.234316859999808,
"count": 10,
"self": 1.234316859999808
}
}
},
"_update_policy": {
"total": 368.08974857000055,
"count": 97,
"self": 307.00682503100757,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.082923538992986,
"count": 2910,
"self": 61.082923538992986
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0660000953066628e-06,
"count": 1,
"self": 1.0660000953066628e-06
},
"TrainerController._save_models": {
"total": 0.10786985300001106,
"count": 1,
"self": 0.0021225099999355734,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10574734300007549,
"count": 1,
"self": 0.10574734300007549
}
}
}
}
}
}
}