ppo-Huggy / run_logs /timers.json
Honza's picture
Huggy
e5a3ce6
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4018255472183228,
"min": 1.4018206596374512,
"max": 1.423440933227539,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69998.7578125,
"min": 67842.6015625,
"max": 78165.234375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 111.40224719101124,
"min": 88.2482269503546,
"max": 399.6507936507937,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49574.0,
"min": 48920.0,
"max": 50356.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999374.0,
"min": 49849.0,
"max": 1999374.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999374.0,
"min": 49849.0,
"max": 1999374.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.38832950592041,
"min": 0.04394489526748657,
"max": 2.4404430389404297,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1062.806640625,
"min": 5.493112087249756,
"max": 1317.5692138671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6093998891583987,
"min": 1.9397121720314026,
"max": 3.916898409804266,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1606.1829506754875,
"min": 242.46402150392532,
"max": 2109.875486910343,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6093998891583987,
"min": 1.9397121720314026,
"max": 3.916898409804266,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1606.1829506754875,
"min": 242.46402150392532,
"max": 2109.875486910343,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01839908395631937,
"min": 0.01380623652948998,
"max": 0.021530444244854154,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.055197251868958117,
"min": 0.02761247305897996,
"max": 0.055197251868958117,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04526344012055133,
"min": 0.022305792011320588,
"max": 0.05602799660215775,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13579032036165398,
"min": 0.044611584022641176,
"max": 0.1651956643909216,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2192489269499913e-06,
"min": 3.2192489269499913e-06,
"max": 0.00029533237655587495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.657746780849974e-06,
"min": 9.657746780849974e-06,
"max": 0.0008437686187438003,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10107305000000004,
"min": 0.10107305000000004,
"max": 0.198444125,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032191500000001,
"min": 0.20730540000000003,
"max": 0.5812562,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.354519499999987e-05,
"min": 6.354519499999987e-05,
"max": 0.0049223618375,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019063558499999962,
"min": 0.00019063558499999962,
"max": 0.01406468438,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671098571",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671100932"
},
"total": 2361.2960041750002,
"count": 1,
"self": 0.39185868599997775,
"children": {
"run_training.setup": {
"total": 0.17483608499998127,
"count": 1,
"self": 0.17483608499998127
},
"TrainerController.start_learning": {
"total": 2360.729309404,
"count": 1,
"self": 4.380098631957026,
"children": {
"TrainerController._reset_env": {
"total": 11.522549939999976,
"count": 1,
"self": 11.522549939999976
},
"TrainerController.advance": {
"total": 2344.698408884043,
"count": 231518,
"self": 4.472828382966782,
"children": {
"env_step": {
"total": 1862.922337191108,
"count": 231518,
"self": 1564.8239015051554,
"children": {
"SubprocessEnvManager._take_step": {
"total": 295.2166908129772,
"count": 231518,
"self": 15.526917245910624,
"children": {
"TorchPolicy.evaluate": {
"total": 279.6897735670666,
"count": 222937,
"self": 69.95726150205104,
"children": {
"TorchPolicy.sample_actions": {
"total": 209.73251206501556,
"count": 222937,
"self": 209.73251206501556
}
}
}
}
},
"workers": {
"total": 2.8817448729753323,
"count": 231518,
"self": 0.0,
"children": {
"worker_root": {
"total": 2351.83363495203,
"count": 231518,
"is_parallel": true,
"self": 1068.671520894094,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023631849999787846,
"count": 1,
"is_parallel": true,
"self": 0.0003763880000065001,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019867969999722845,
"count": 2,
"is_parallel": true,
"self": 0.0019867969999722845
}
}
},
"UnityEnvironment.step": {
"total": 0.02871889900001179,
"count": 1,
"is_parallel": true,
"self": 0.00028612700003805003,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020416099999920334,
"count": 1,
"is_parallel": true,
"self": 0.00020416099999920334
},
"communicator.exchange": {
"total": 0.02749356699996497,
"count": 1,
"is_parallel": true,
"self": 0.02749356699996497
},
"steps_from_proto": {
"total": 0.0007350440000095659,
"count": 1,
"is_parallel": true,
"self": 0.0002699739999911799,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046507000001838605,
"count": 2,
"is_parallel": true,
"self": 0.00046507000001838605
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1283.162114057936,
"count": 231517,
"is_parallel": true,
"self": 36.46153793286658,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.68483486104526,
"count": 231517,
"is_parallel": true,
"self": 80.68483486104526
},
"communicator.exchange": {
"total": 1065.9868692130162,
"count": 231517,
"is_parallel": true,
"self": 1065.9868692130162
},
"steps_from_proto": {
"total": 100.02887205100802,
"count": 231517,
"is_parallel": true,
"self": 43.06320996593229,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.965662085075735,
"count": 463034,
"is_parallel": true,
"self": 56.965662085075735
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 477.30324330996814,
"count": 231518,
"self": 6.932602630946747,
"children": {
"process_trajectory": {
"total": 156.33901913302316,
"count": 231518,
"self": 155.16070997202348,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1783091609996745,
"count": 10,
"self": 1.1783091609996745
}
}
},
"_update_policy": {
"total": 314.03162154599823,
"count": 97,
"self": 259.6131683150029,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.41845323099534,
"count": 2910,
"self": 54.41845323099534
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.049999789567664e-07,
"count": 1,
"self": 9.049999789567664e-07
},
"TrainerController._save_models": {
"total": 0.12825104300009116,
"count": 1,
"self": 0.002836564000517683,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12541447899957348,
"count": 1,
"self": 0.12541447899957348
}
}
}
}
}
}
}