ppo-Huggy / run_logs /timers.json
jamesimmanuel's picture
Huggy
724db34
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4075062274932861,
"min": 1.4075062274932861,
"max": 1.4276797771453857,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70252.859375,
"min": 68172.15625,
"max": 77430.46875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 93.97153700189753,
"min": 86.43006993006993,
"max": 372.82089552238807,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49523.0,
"min": 48933.0,
"max": 50017.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999998.0,
"min": 49973.0,
"max": 1999998.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999998.0,
"min": 49973.0,
"max": 1999998.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3477418422698975,
"min": 0.11347759515047073,
"max": 2.4558935165405273,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1237.260009765625,
"min": 15.092519760131836,
"max": 1387.40771484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.684491062186927,
"min": 1.8534893765485376,
"max": 3.950216238848425,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1941.7267897725105,
"min": 246.5140870809555,
"max": 2261.391624212265,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.684491062186927,
"min": 1.8534893765485376,
"max": 3.950216238848425,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1941.7267897725105,
"min": 246.5140870809555,
"max": 2261.391624212265,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014808441545917756,
"min": 0.014121051169867213,
"max": 0.020664753820892657,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04442532463775327,
"min": 0.028809817308016742,
"max": 0.05455510387700997,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04976676363084052,
"min": 0.02285816151027878,
"max": 0.05545135879268249,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14930029089252156,
"min": 0.04571632302055756,
"max": 0.1648191187530756,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.568448810549999e-06,
"min": 3.568448810549999e-06,
"max": 0.0002952818265727249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0705346431649997e-05,
"min": 1.0705346431649997e-05,
"max": 0.0008440602186466,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118945000000003,
"min": 0.10118945000000003,
"max": 0.19842727500000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035683500000001,
"min": 0.20755245,
"max": 0.5813534000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.935355499999996e-05,
"min": 6.935355499999996e-05,
"max": 0.0049215210225,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002080606649999999,
"min": 0.0002080606649999999,
"max": 0.014069534660000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678161150",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678163646"
},
"total": 2496.4464291100003,
"count": 1,
"self": 0.44391480200010847,
"children": {
"run_training.setup": {
"total": 0.12275110500002029,
"count": 1,
"self": 0.12275110500002029
},
"TrainerController.start_learning": {
"total": 2495.879763203,
"count": 1,
"self": 4.3707996789548815,
"children": {
"TrainerController._reset_env": {
"total": 10.140792363999992,
"count": 1,
"self": 10.140792363999992
},
"TrainerController.advance": {
"total": 2481.2503091920453,
"count": 232328,
"self": 4.640568390067983,
"children": {
"env_step": {
"total": 1923.6623960120096,
"count": 232328,
"self": 1609.969172685111,
"children": {
"SubprocessEnvManager._take_step": {
"total": 310.9042638818619,
"count": 232328,
"self": 16.242378362930367,
"children": {
"TorchPolicy.evaluate": {
"total": 294.6618855189315,
"count": 222946,
"self": 74.03178294189507,
"children": {
"TorchPolicy.sample_actions": {
"total": 220.63010257703644,
"count": 222946,
"self": 220.63010257703644
}
}
}
}
},
"workers": {
"total": 2.788959445036653,
"count": 232328,
"self": 0.0,
"children": {
"worker_root": {
"total": 2487.2301917300147,
"count": 232328,
"is_parallel": true,
"self": 1178.5211744239782,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000977776000013364,
"count": 1,
"is_parallel": true,
"self": 0.0003452459999380153,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006325300000753487,
"count": 2,
"is_parallel": true,
"self": 0.0006325300000753487
}
}
},
"UnityEnvironment.step": {
"total": 0.02879418199995598,
"count": 1,
"is_parallel": true,
"self": 0.00030632799996510585,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002038330000004862,
"count": 1,
"is_parallel": true,
"self": 0.0002038330000004862
},
"communicator.exchange": {
"total": 0.02754795299995294,
"count": 1,
"is_parallel": true,
"self": 0.02754795299995294
},
"steps_from_proto": {
"total": 0.0007360680000374487,
"count": 1,
"is_parallel": true,
"self": 0.0002443820000053165,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004916860000321321,
"count": 2,
"is_parallel": true,
"self": 0.0004916860000321321
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1308.7090173060365,
"count": 232327,
"is_parallel": true,
"self": 39.290163262145825,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.7905579239034,
"count": 232327,
"is_parallel": true,
"self": 80.7905579239034
},
"communicator.exchange": {
"total": 1094.5904654809576,
"count": 232327,
"is_parallel": true,
"self": 1094.5904654809576
},
"steps_from_proto": {
"total": 94.03783063902961,
"count": 232327,
"is_parallel": true,
"self": 38.079056690127345,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.95877394890226,
"count": 464654,
"is_parallel": true,
"self": 55.95877394890226
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 552.9473447899677,
"count": 232328,
"self": 7.050016910988688,
"children": {
"process_trajectory": {
"total": 171.38932042597918,
"count": 232328,
"self": 170.16924092597964,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2200794999995423,
"count": 10,
"self": 1.2200794999995423
}
}
},
"_update_policy": {
"total": 374.5080074529999,
"count": 97,
"self": 315.267917478998,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.240089974001876,
"count": 2910,
"self": 59.240089974001876
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.350002644874621e-07,
"count": 1,
"self": 8.350002644874621e-07
},
"TrainerController._save_models": {
"total": 0.11786113299967838,
"count": 1,
"self": 0.002198993999627419,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11566213900005096,
"count": 1,
"self": 0.11566213900005096
}
}
}
}
}
}
}