ppo-Huggy / run_logs /timers.json
loveisp's picture
Huggy
808239b
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4121851921081543,
"min": 1.4121851921081543,
"max": 1.4318958520889282,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70222.3203125,
"min": 68515.203125,
"max": 77494.0703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.59432387312187,
"min": 77.28684627575278,
"max": 395.6062992125984,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49474.0,
"min": 48768.0,
"max": 50268.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999394.0,
"min": 49964.0,
"max": 1999394.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999394.0,
"min": 49964.0,
"max": 1999394.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.411043882369995,
"min": 0.06464269757270813,
"max": 2.477888345718384,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1444.21533203125,
"min": 8.144979476928711,
"max": 1575.585693359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7773065561046186,
"min": 1.7413561703666809,
"max": 4.04892999576035,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2262.6066271066666,
"min": 219.41087746620178,
"max": 2485.322323858738,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7773065561046186,
"min": 1.7413561703666809,
"max": 4.04892999576035,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2262.6066271066666,
"min": 219.41087746620178,
"max": 2485.322323858738,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017268177536768944,
"min": 0.012979278210291845,
"max": 0.019817366094018022,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05180453261030683,
"min": 0.029242635642488798,
"max": 0.05634538390246841,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06035505843659242,
"min": 0.022081264574080706,
"max": 0.07740602089713017,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18106517530977725,
"min": 0.04416252914816141,
"max": 0.18403593425949416,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6694987768666676e-06,
"min": 3.6694987768666676e-06,
"max": 0.0002953091265636249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1008496330600002e-05,
"min": 1.1008496330600002e-05,
"max": 0.0008438691187102997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122313333333331,
"min": 0.10122313333333331,
"max": 0.198436375,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036693999999999,
"min": 0.20758075,
"max": 0.5812897,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.103435333333335e-05,
"min": 7.103435333333335e-05,
"max": 0.004921975112500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021310306000000005,
"min": 0.00021310306000000005,
"max": 0.014066356030000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678177699",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678180017"
},
"total": 2317.77214,
"count": 1,
"self": 0.7333717400006208,
"children": {
"run_training.setup": {
"total": 0.1863917709999896,
"count": 1,
"self": 0.1863917709999896
},
"TrainerController.start_learning": {
"total": 2316.8523764889997,
"count": 1,
"self": 4.122406416893227,
"children": {
"TrainerController._reset_env": {
"total": 9.289795688000027,
"count": 1,
"self": 9.289795688000027
},
"TrainerController.advance": {
"total": 2303.2766454981065,
"count": 232644,
"self": 4.416856290140913,
"children": {
"env_step": {
"total": 1792.938038053955,
"count": 232644,
"self": 1496.434349369923,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.81059027500146,
"count": 232644,
"self": 15.36654579301495,
"children": {
"TorchPolicy.evaluate": {
"total": 278.4440444819865,
"count": 222958,
"self": 69.98674486101811,
"children": {
"TorchPolicy.sample_actions": {
"total": 208.4572996209684,
"count": 222958,
"self": 208.4572996209684
}
}
}
}
},
"workers": {
"total": 2.69309840903054,
"count": 232644,
"self": 0.0,
"children": {
"worker_root": {
"total": 2308.642697594002,
"count": 232644,
"is_parallel": true,
"self": 1097.0252181349429,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011832160000153635,
"count": 1,
"is_parallel": true,
"self": 0.00046528300003956247,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007179329999758011,
"count": 2,
"is_parallel": true,
"self": 0.0007179329999758011
}
}
},
"UnityEnvironment.step": {
"total": 0.02851040100000546,
"count": 1,
"is_parallel": true,
"self": 0.00028891900001326576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019684599999436614,
"count": 1,
"is_parallel": true,
"self": 0.00019684599999436614
},
"communicator.exchange": {
"total": 0.02734279499998138,
"count": 1,
"is_parallel": true,
"self": 0.02734279499998138
},
"steps_from_proto": {
"total": 0.0006818410000164477,
"count": 1,
"is_parallel": true,
"self": 0.0002319640000223444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00044987699999410324,
"count": 2,
"is_parallel": true,
"self": 0.00044987699999410324
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1211.6174794590593,
"count": 232643,
"is_parallel": true,
"self": 37.717154770089564,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.07757603202498,
"count": 232643,
"is_parallel": true,
"self": 77.07757603202498
},
"communicator.exchange": {
"total": 1008.2097837089375,
"count": 232643,
"is_parallel": true,
"self": 1008.2097837089375
},
"steps_from_proto": {
"total": 88.61296494800723,
"count": 232643,
"is_parallel": true,
"self": 35.749214418219196,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.863750529788035,
"count": 465286,
"is_parallel": true,
"self": 52.863750529788035
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 505.9217511540104,
"count": 232644,
"self": 6.193350346009083,
"children": {
"process_trajectory": {
"total": 160.9435003110009,
"count": 232644,
"self": 159.57814674400072,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3653535670001702,
"count": 10,
"self": 1.3653535670001702
}
}
},
"_update_policy": {
"total": 338.7849004970004,
"count": 97,
"self": 282.29009545100564,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.49480504599478,
"count": 2910,
"self": 56.49480504599478
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2579998838191386e-06,
"count": 1,
"self": 1.2579998838191386e-06
},
"TrainerController._save_models": {
"total": 0.1635276279998834,
"count": 1,
"self": 0.0026109410000572097,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1609166869998262,
"count": 1,
"self": 0.1609166869998262
}
}
}
}
}
}
}