ppo-Huggy / run_logs /timers.json
Rachel9916's picture
Huggy
563b785 verified
raw
history blame
17.5 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4081580638885498,
"min": 1.4081580638885498,
"max": 1.4299359321594238,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 67739.4453125,
"min": 67739.4453125,
"max": 77998.828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 100.86326530612244,
"min": 70.32810271041369,
"max": 396.56692913385825,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49423.0,
"min": 48826.0,
"max": 50364.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999936.0,
"min": 49741.0,
"max": 1999936.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999936.0,
"min": 49741.0,
"max": 1999936.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3723652362823486,
"min": -0.09381920099258423,
"max": 2.522627592086792,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1164.831298828125,
"min": -11.821219444274902,
"max": 1746.667236328125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4971060803854295,
"min": 1.8632938334393123,
"max": 4.000147507945002,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1717.079085469246,
"min": 234.77502301335335,
"max": 2774.223048388958,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4971060803854295,
"min": 1.8632938334393123,
"max": 4.000147507945002,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1717.079085469246,
"min": 234.77502301335335,
"max": 2774.223048388958,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.013962742061832817,
"min": 0.013962742061832817,
"max": 0.01997995570030374,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04188822618549845,
"min": 0.028504544915631414,
"max": 0.056037559693618275,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04967296462919977,
"min": 0.023114980043222506,
"max": 0.062390311559041345,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1490188938875993,
"min": 0.04622996008644501,
"max": 0.18717093467712403,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.788198737299993e-06,
"min": 3.788198737299993e-06,
"max": 0.00029533282655572494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.136459621189998e-05,
"min": 1.136459621189998e-05,
"max": 0.0008441059686313498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10126269999999998,
"min": 0.10126269999999998,
"max": 0.198444275,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30378809999999995,
"min": 0.20765625000000004,
"max": 0.5813686500000003,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.300872999999995e-05,
"min": 7.300872999999995e-05,
"max": 0.0049223693225,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021902618999999982,
"min": 0.00021902618999999982,
"max": 0.014070295634999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716809780",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1716812315"
},
"total": 2535.615995832,
"count": 1,
"self": 0.43827539499989143,
"children": {
"run_training.setup": {
"total": 0.08411408799997844,
"count": 1,
"self": 0.08411408799997844
},
"TrainerController.start_learning": {
"total": 2535.093606349,
"count": 1,
"self": 4.46230303000948,
"children": {
"TrainerController._reset_env": {
"total": 3.0503491509999776,
"count": 1,
"self": 3.0503491509999776
},
"TrainerController.advance": {
"total": 2527.4621090279907,
"count": 233472,
"self": 4.738563134730612,
"children": {
"env_step": {
"total": 1993.0355513970967,
"count": 233472,
"self": 1652.2929285780579,
"children": {
"SubprocessEnvManager._take_step": {
"total": 337.7664729940626,
"count": 233472,
"self": 17.983147136046227,
"children": {
"TorchPolicy.evaluate": {
"total": 319.7833258580164,
"count": 223002,
"self": 319.7833258580164
}
}
},
"workers": {
"total": 2.9761498249762326,
"count": 233472,
"self": 0.0,
"children": {
"worker_root": {
"total": 2527.5393352370193,
"count": 233472,
"is_parallel": true,
"self": 1190.5627079170108,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008949650000431575,
"count": 1,
"is_parallel": true,
"self": 0.00024078900014501414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006541759998981433,
"count": 2,
"is_parallel": true,
"self": 0.0006541759998981433
}
}
},
"UnityEnvironment.step": {
"total": 0.02985435999994479,
"count": 1,
"is_parallel": true,
"self": 0.000389221000091311,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002046769999424214,
"count": 1,
"is_parallel": true,
"self": 0.0002046769999424214
},
"communicator.exchange": {
"total": 0.028484461999937594,
"count": 1,
"is_parallel": true,
"self": 0.028484461999937594
},
"steps_from_proto": {
"total": 0.0007759999999734646,
"count": 1,
"is_parallel": true,
"self": 0.00019827800008442864,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005777219998890359,
"count": 2,
"is_parallel": true,
"self": 0.0005777219998890359
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1336.9766273200084,
"count": 233471,
"is_parallel": true,
"self": 40.351981717761646,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.71379069609907,
"count": 233471,
"is_parallel": true,
"self": 87.71379069609907
},
"communicator.exchange": {
"total": 1114.505760893121,
"count": 233471,
"is_parallel": true,
"self": 1114.505760893121
},
"steps_from_proto": {
"total": 94.40509401302666,
"count": 233471,
"is_parallel": true,
"self": 35.96100898404961,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.44408502897704,
"count": 466942,
"is_parallel": true,
"self": 58.44408502897704
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 529.6879944961636,
"count": 233472,
"self": 6.704380728222986,
"children": {
"process_trajectory": {
"total": 165.96220885594005,
"count": 233472,
"self": 164.59656214594065,
"children": {
"RLTrainer._checkpoint": {
"total": 1.365646709999396,
"count": 10,
"self": 1.365646709999396
}
}
},
"_update_policy": {
"total": 357.02140491200055,
"count": 97,
"self": 292.88909449499124,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.13231041700931,
"count": 2910,
"self": 64.13231041700931
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.017999693431193e-06,
"count": 1,
"self": 1.017999693431193e-06
},
"TrainerController._save_models": {
"total": 0.11884412200015504,
"count": 1,
"self": 0.0018088360002366244,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11703528599991841,
"count": 1,
"self": 0.11703528599991841
}
}
}
}
}
}
}