ppo-Huggy / run_logs /timers.json
Ducco's picture
Huggy
d632b7e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.397064447402954,
"min": 1.397064447402954,
"max": 1.4263479709625244,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68654.5390625,
"min": 68514.609375,
"max": 76033.484375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.13253012048193,
"min": 78.74363057324841,
"max": 406.1788617886179,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49462.0,
"min": 48624.0,
"max": 50137.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999938.0,
"min": 49334.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999938.0,
"min": 49334.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.491286039352417,
"min": 0.13656732439994812,
"max": 2.491286039352417,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1447.4371337890625,
"min": 16.661212921142578,
"max": 1509.7960205078125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9212696842605603,
"min": 1.8191229725470308,
"max": 4.006738985372969,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2278.2576865553856,
"min": 221.93300265073776,
"max": 2424.077086150646,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9212696842605603,
"min": 1.8191229725470308,
"max": 4.006738985372969,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2278.2576865553856,
"min": 221.93300265073776,
"max": 2424.077086150646,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017892672631913024,
"min": 0.012788815704576944,
"max": 0.02078189190942794,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.053678017895739075,
"min": 0.025577631409153888,
"max": 0.0540398203273071,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05762129678494401,
"min": 0.020709910957763592,
"max": 0.06391094668457906,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17286389035483202,
"min": 0.041419821915527184,
"max": 0.17286389035483202,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4207988597666653e-06,
"min": 3.4207988597666653e-06,
"max": 0.000295325626558125,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0262396579299996e-05,
"min": 1.0262396579299996e-05,
"max": 0.0008438569687143501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114023333333333,
"min": 0.10114023333333333,
"max": 0.198441875,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034207,
"min": 0.20740660000000005,
"max": 0.5812856500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.689764333333332e-05,
"min": 6.689764333333332e-05,
"max": 0.0049222495625,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020069292999999998,
"min": 0.00020069292999999998,
"max": 0.014066153934999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688018181",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688020773"
},
"total": 2591.569229038,
"count": 1,
"self": 0.4385996159999195,
"children": {
"run_training.setup": {
"total": 0.04350901899999826,
"count": 1,
"self": 0.04350901899999826
},
"TrainerController.start_learning": {
"total": 2591.087120403,
"count": 1,
"self": 4.676937588916644,
"children": {
"TrainerController._reset_env": {
"total": 4.067804249999995,
"count": 1,
"self": 4.067804249999995
},
"TrainerController.advance": {
"total": 2582.217949512083,
"count": 232409,
"self": 4.945376461151227,
"children": {
"env_step": {
"total": 2024.439404770004,
"count": 232409,
"self": 1706.9350448492169,
"children": {
"SubprocessEnvManager._take_step": {
"total": 314.4178309199108,
"count": 232409,
"self": 18.29915195600404,
"children": {
"TorchPolicy.evaluate": {
"total": 296.11867896390675,
"count": 222870,
"self": 296.11867896390675
}
}
},
"workers": {
"total": 3.08652900087651,
"count": 232409,
"self": 0.0,
"children": {
"worker_root": {
"total": 2582.9498827590237,
"count": 232409,
"is_parallel": true,
"self": 1190.1576891891032,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009899570000015956,
"count": 1,
"is_parallel": true,
"self": 0.00026492699998925673,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007250300000123389,
"count": 2,
"is_parallel": true,
"self": 0.0007250300000123389
}
}
},
"UnityEnvironment.step": {
"total": 0.03026945499999556,
"count": 1,
"is_parallel": true,
"self": 0.000397539999937635,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023165800001834214,
"count": 1,
"is_parallel": true,
"self": 0.00023165800001834214
},
"communicator.exchange": {
"total": 0.02879794000000402,
"count": 1,
"is_parallel": true,
"self": 0.02879794000000402
},
"steps_from_proto": {
"total": 0.000842317000035564,
"count": 1,
"is_parallel": true,
"self": 0.0002466029999936836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005957140000418804,
"count": 2,
"is_parallel": true,
"self": 0.0005957140000418804
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1392.7921935699205,
"count": 232408,
"is_parallel": true,
"self": 41.203906251985245,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.28974908491733,
"count": 232408,
"is_parallel": true,
"self": 86.28974908491733
},
"communicator.exchange": {
"total": 1163.3011346070743,
"count": 232408,
"is_parallel": true,
"self": 1163.3011346070743
},
"steps_from_proto": {
"total": 101.99740362594378,
"count": 232408,
"is_parallel": true,
"self": 38.293192966008576,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.7042106599352,
"count": 464816,
"is_parallel": true,
"self": 63.7042106599352
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 552.833168280928,
"count": 232409,
"self": 7.0195988038929045,
"children": {
"process_trajectory": {
"total": 150.34803216803493,
"count": 232409,
"self": 148.73914528203545,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6088868859994818,
"count": 10,
"self": 1.6088868859994818
}
}
},
"_update_policy": {
"total": 395.4655373090002,
"count": 97,
"self": 335.069453045002,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.396084263998205,
"count": 2910,
"self": 60.396084263998205
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.960003808373585e-07,
"count": 1,
"self": 9.960003808373585e-07
},
"TrainerController._save_models": {
"total": 0.12442805599994244,
"count": 1,
"self": 0.0018444229995111527,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12258363300043129,
"count": 1,
"self": 0.12258363300043129
}
}
}
}
}
}
}