ppo-Huggy / run_logs /timers.json
tmoroder's picture
Huggy trained!
a807c0d verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4011754989624023,
"min": 1.4011754989624023,
"max": 1.4279377460479736,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70678.09375,
"min": 68451.7265625,
"max": 77722.4609375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 100.41987829614604,
"min": 95.16666666666667,
"max": 393.7165354330709,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49507.0,
"min": 49072.0,
"max": 50002.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999989.0,
"min": 49715.0,
"max": 1999989.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999989.0,
"min": 49715.0,
"max": 1999989.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.310187339782715,
"min": 0.14300993084907532,
"max": 2.434074640274048,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1138.92236328125,
"min": 18.019250869750977,
"max": 1217.2301025390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.544467935213936,
"min": 1.7369347597871507,
"max": 3.868933779384018,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1747.4226920604706,
"min": 218.853779733181,
"max": 1884.7530118823051,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.544467935213936,
"min": 1.7369347597871507,
"max": 3.868933779384018,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1747.4226920604706,
"min": 218.853779733181,
"max": 1884.7530118823051,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.013614045628411177,
"min": 0.013614045628411177,
"max": 0.019762169700425148,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04084213688523353,
"min": 0.030807156441733242,
"max": 0.05920418343982116,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04703238560921616,
"min": 0.021136720416446528,
"max": 0.053348469796280065,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14109715682764848,
"min": 0.042273440832893056,
"max": 0.15203580719729265,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1523489492500004e-06,
"min": 3.1523489492500004e-06,
"max": 0.0002953671015442999,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.457046847750002e-06,
"min": 9.457046847750002e-06,
"max": 0.0008441601186133,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105075000000001,
"min": 0.10105075000000001,
"max": 0.19845569999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30315225,
"min": 0.20726869999999997,
"max": 0.5813866999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.243242500000004e-05,
"min": 6.243242500000004e-05,
"max": 0.0049229394299999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001872972750000001,
"min": 0.0001872972750000001,
"max": 0.014071196330000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1729104960",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1729107431"
},
"total": 2470.477981348,
"count": 1,
"self": 0.4368546559999231,
"children": {
"run_training.setup": {
"total": 0.06264626400002271,
"count": 1,
"self": 0.06264626400002271
},
"TrainerController.start_learning": {
"total": 2469.9784804280002,
"count": 1,
"self": 4.440259727933608,
"children": {
"TrainerController._reset_env": {
"total": 4.775880373000064,
"count": 1,
"self": 4.775880373000064
},
"TrainerController.advance": {
"total": 2460.6329226970665,
"count": 231643,
"self": 4.750528723116531,
"children": {
"env_step": {
"total": 1964.0659677489061,
"count": 231643,
"self": 1551.2075607229729,
"children": {
"SubprocessEnvManager._take_step": {
"total": 410.12422747095945,
"count": 231643,
"self": 15.481015913028841,
"children": {
"TorchPolicy.evaluate": {
"total": 394.6432115579306,
"count": 222948,
"self": 394.6432115579306
}
}
},
"workers": {
"total": 2.734179554973821,
"count": 231643,
"self": 0.0,
"children": {
"worker_root": {
"total": 2462.3782089721212,
"count": 231643,
"is_parallel": true,
"self": 1207.2119159880804,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009217960000569292,
"count": 1,
"is_parallel": true,
"self": 0.00027545899990855105,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006463370001483781,
"count": 2,
"is_parallel": true,
"self": 0.0006463370001483781
}
}
},
"UnityEnvironment.step": {
"total": 0.028796769999985372,
"count": 1,
"is_parallel": true,
"self": 0.0003742730001476957,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018592899994018808,
"count": 1,
"is_parallel": true,
"self": 0.00018592899994018808
},
"communicator.exchange": {
"total": 0.02750927799991132,
"count": 1,
"is_parallel": true,
"self": 0.02750927799991132
},
"steps_from_proto": {
"total": 0.0007272899999861693,
"count": 1,
"is_parallel": true,
"self": 0.00020285899984173739,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005244310001444319,
"count": 2,
"is_parallel": true,
"self": 0.0005244310001444319
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1255.1662929840409,
"count": 231642,
"is_parallel": true,
"self": 37.814787965226515,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.06925407494555,
"count": 231642,
"is_parallel": true,
"self": 83.06925407494555
},
"communicator.exchange": {
"total": 1044.8241998149322,
"count": 231642,
"is_parallel": true,
"self": 1044.8241998149322
},
"steps_from_proto": {
"total": 89.45805112893652,
"count": 231642,
"is_parallel": true,
"self": 33.9619102209773,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.496140907959216,
"count": 463284,
"is_parallel": true,
"self": 55.496140907959216
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 491.81642622504387,
"count": 231643,
"self": 6.509643901050481,
"children": {
"process_trajectory": {
"total": 153.280740967994,
"count": 231643,
"self": 151.90867075099516,
"children": {
"RLTrainer._checkpoint": {
"total": 1.37207021699885,
"count": 10,
"self": 1.37207021699885
}
}
},
"_update_policy": {
"total": 332.0260413559994,
"count": 97,
"self": 267.27093406300037,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.75510729299901,
"count": 2910,
"self": 64.75510729299901
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0280000424245372e-06,
"count": 1,
"self": 1.0280000424245372e-06
},
"TrainerController._save_models": {
"total": 0.12941660199976468,
"count": 1,
"self": 0.002470400999754929,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12694620100000975,
"count": 1,
"self": 0.12694620100000975
}
}
}
}
}
}
}