ppo-Huggy / run_logs /timers.json
GauthamB's picture
Huggy
8b02c2e
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4023464918136597,
"min": 1.4023464918136597,
"max": 1.4265486001968384,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70149.578125,
"min": 68694.1171875,
"max": 77739.1875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.54766031195841,
"min": 82.54362416107382,
"max": 401.04,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49361.0,
"min": 48927.0,
"max": 50273.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999906.0,
"min": 49900.0,
"max": 1999906.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999906.0,
"min": 49900.0,
"max": 1999906.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4622442722320557,
"min": 0.14654095470905304,
"max": 2.4866180419921875,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1420.7149658203125,
"min": 18.171077728271484,
"max": 1454.1258544921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7554354643904357,
"min": 1.8377630359703494,
"max": 3.9971567664502397,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2166.8862629532814,
"min": 227.88261646032333,
"max": 2302.362297475338,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7554354643904357,
"min": 1.8377630359703494,
"max": 3.9971567664502397,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2166.8862629532814,
"min": 227.88261646032333,
"max": 2302.362297475338,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016368018763346804,
"min": 0.013888875709380955,
"max": 0.020818655584783602,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.049104056290040415,
"min": 0.02777775141876191,
"max": 0.05499447993302056,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.060176877718832754,
"min": 0.021040726515154044,
"max": 0.06100808034340541,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18053063315649825,
"min": 0.04208145303030809,
"max": 0.18053063315649825,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.521648826149995e-06,
"min": 3.521648826149995e-06,
"max": 0.00029529780156739994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0564946478449984e-05,
"min": 1.0564946478449984e-05,
"max": 0.0008438994187001999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117385000000001,
"min": 0.10117385000000001,
"max": 0.19843260000000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30352155000000003,
"min": 0.20747230000000003,
"max": 0.5812997999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.857511499999993e-05,
"min": 6.857511499999993e-05,
"max": 0.00492178674,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002057253449999998,
"min": 0.0002057253449999998,
"max": 0.014066860020000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671559990",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671562431"
},
"total": 2440.391841494,
"count": 1,
"self": 0.3871526820003055,
"children": {
"run_training.setup": {
"total": 0.11377044300002126,
"count": 1,
"self": 0.11377044300002126
},
"TrainerController.start_learning": {
"total": 2439.890918369,
"count": 1,
"self": 4.5934625999789205,
"children": {
"TrainerController._reset_env": {
"total": 7.666941338000015,
"count": 1,
"self": 7.666941338000015
},
"TrainerController.advance": {
"total": 2427.511245669021,
"count": 232361,
"self": 4.582865300035337,
"children": {
"env_step": {
"total": 1936.0024692130182,
"count": 232361,
"self": 1622.7298261429862,
"children": {
"SubprocessEnvManager._take_step": {
"total": 310.2461833369846,
"count": 232361,
"self": 16.232683114973497,
"children": {
"TorchPolicy.evaluate": {
"total": 294.0135002220111,
"count": 222964,
"self": 73.77051965398891,
"children": {
"TorchPolicy.sample_actions": {
"total": 220.24298056802218,
"count": 222964,
"self": 220.24298056802218
}
}
}
}
},
"workers": {
"total": 3.0264597330474885,
"count": 232361,
"self": 0.0,
"children": {
"worker_root": {
"total": 2430.7275371779465,
"count": 232361,
"is_parallel": true,
"self": 1099.6070148200292,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024151640000127372,
"count": 1,
"is_parallel": true,
"self": 0.0004845039999850087,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019306600000277285,
"count": 2,
"is_parallel": true,
"self": 0.0019306600000277285
}
}
},
"UnityEnvironment.step": {
"total": 0.03084127099998568,
"count": 1,
"is_parallel": true,
"self": 0.0003074670000273727,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020725799998899674,
"count": 1,
"is_parallel": true,
"self": 0.00020725799998899674
},
"communicator.exchange": {
"total": 0.029531954999981735,
"count": 1,
"is_parallel": true,
"self": 0.029531954999981735
},
"steps_from_proto": {
"total": 0.0007945909999875767,
"count": 1,
"is_parallel": true,
"self": 0.00027306699996643147,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005215240000211452,
"count": 2,
"is_parallel": true,
"self": 0.0005215240000211452
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1331.1205223579173,
"count": 232360,
"is_parallel": true,
"self": 37.65657343776297,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.80130040507072,
"count": 232360,
"is_parallel": true,
"self": 83.80130040507072
},
"communicator.exchange": {
"total": 1106.280310535007,
"count": 232360,
"is_parallel": true,
"self": 1106.280310535007
},
"steps_from_proto": {
"total": 103.38233798007673,
"count": 232360,
"is_parallel": true,
"self": 43.89619600324079,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.48614197683594,
"count": 464720,
"is_parallel": true,
"self": 59.48614197683594
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 486.92591115596724,
"count": 232361,
"self": 6.997449884990658,
"children": {
"process_trajectory": {
"total": 157.10485265297842,
"count": 232361,
"self": 155.91805222297847,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1868004299999484,
"count": 10,
"self": 1.1868004299999484
}
}
},
"_update_policy": {
"total": 322.82360861799816,
"count": 97,
"self": 268.7531007259945,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.07050789200366,
"count": 2910,
"self": 54.07050789200366
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0520002433622722e-06,
"count": 1,
"self": 1.0520002433622722e-06
},
"TrainerController._save_models": {
"total": 0.11926770999980363,
"count": 1,
"self": 0.0019217399999433837,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11734596999986024,
"count": 1,
"self": 0.11734596999986024
}
}
}
}
}
}
}