ppo-Huggy / run_logs /timers.json
FBM's picture
Huggy
2efcffe
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3999911546707153,
"min": 1.3999911546707153,
"max": 1.4254693984985352,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70832.5546875,
"min": 68701.953125,
"max": 77428.03125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 68.01936376210236,
"min": 66.20295698924731,
"max": 411.24590163934425,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49178.0,
"min": 49178.0,
"max": 50172.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999961.0,
"min": 49983.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999961.0,
"min": 49983.0,
"max": 1999961.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5342283248901367,
"min": 0.055319979786872864,
"max": 2.5440847873687744,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1832.2470703125,
"min": 6.6937174797058105,
"max": 1836.94775390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9295000326748872,
"min": 1.7680493077217054,
"max": 4.044752422152132,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2841.0285236239433,
"min": 213.93396623432636,
"max": 2865.4711268544197,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9295000326748872,
"min": 1.7680493077217054,
"max": 4.044752422152132,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2841.0285236239433,
"min": 213.93396623432636,
"max": 2865.4711268544197,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01686525253866825,
"min": 0.013672868999795659,
"max": 0.02083468255586922,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05059575761600475,
"min": 0.027345737999591318,
"max": 0.05869958514231257,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.061821760899490784,
"min": 0.023014491299788158,
"max": 0.07025906164199114,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18546528269847234,
"min": 0.046028982599576315,
"max": 0.18746341144045192,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.859348713583336e-06,
"min": 3.859348713583336e-06,
"max": 0.000295362976545675,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1578046140750009e-05,
"min": 1.1578046140750009e-05,
"max": 0.00084402286865905,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128641666666667,
"min": 0.10128641666666667,
"max": 0.19845432500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30385925,
"min": 0.20770125,
"max": 0.58134095,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.419219166666672e-05,
"min": 7.419219166666672e-05,
"max": 0.004922870817499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022257657500000016,
"min": 0.00022257657500000016,
"max": 0.014068913405000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671091054",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671093194"
},
"total": 2140.3331431879997,
"count": 1,
"self": 0.39033066399952077,
"children": {
"run_training.setup": {
"total": 0.11495345000002999,
"count": 1,
"self": 0.11495345000002999
},
"TrainerController.start_learning": {
"total": 2139.827859074,
"count": 1,
"self": 3.717056053977103,
"children": {
"TrainerController._reset_env": {
"total": 10.030463542999996,
"count": 1,
"self": 10.030463542999996
},
"TrainerController.advance": {
"total": 2125.967298165023,
"count": 233930,
"self": 3.7981505729785567,
"children": {
"env_step": {
"total": 1665.5754644479446,
"count": 233930,
"self": 1400.0357212478616,
"children": {
"SubprocessEnvManager._take_step": {
"total": 263.0007545870594,
"count": 233930,
"self": 14.140335303931806,
"children": {
"TorchPolicy.evaluate": {
"total": 248.8604192831276,
"count": 222967,
"self": 62.98576209732482,
"children": {
"TorchPolicy.sample_actions": {
"total": 185.8746571858028,
"count": 222967,
"self": 185.8746571858028
}
}
}
}
},
"workers": {
"total": 2.538988613023548,
"count": 233930,
"self": 0.0,
"children": {
"worker_root": {
"total": 2132.433781765019,
"count": 233930,
"is_parallel": true,
"self": 980.3343539520531,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024006180001379107,
"count": 1,
"is_parallel": true,
"self": 0.0004253340000559547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001975284000081956,
"count": 2,
"is_parallel": true,
"self": 0.001975284000081956
}
}
},
"UnityEnvironment.step": {
"total": 0.026880347000087568,
"count": 1,
"is_parallel": true,
"self": 0.0002782000003662688,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021868699991500762,
"count": 1,
"is_parallel": true,
"self": 0.00021868699991500762
},
"communicator.exchange": {
"total": 0.025694968999914636,
"count": 1,
"is_parallel": true,
"self": 0.025694968999914636
},
"steps_from_proto": {
"total": 0.0006884909998916555,
"count": 1,
"is_parallel": true,
"self": 0.000234519999821714,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004539710000699415,
"count": 2,
"is_parallel": true,
"self": 0.0004539710000699415
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1152.0994278129658,
"count": 233929,
"is_parallel": true,
"self": 33.634095385099045,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.61055896392463,
"count": 233929,
"is_parallel": true,
"self": 75.61055896392463
},
"communicator.exchange": {
"total": 951.6960263040248,
"count": 233929,
"is_parallel": true,
"self": 951.6960263040248
},
"steps_from_proto": {
"total": 91.15874715991731,
"count": 233929,
"is_parallel": true,
"self": 37.42035431196177,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.73839284795554,
"count": 467858,
"is_parallel": true,
"self": 53.73839284795554
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 456.5936831440995,
"count": 233930,
"self": 5.657198053096863,
"children": {
"process_trajectory": {
"total": 146.46940265800094,
"count": 233930,
"self": 146.0141291470013,
"children": {
"RLTrainer._checkpoint": {
"total": 0.455273510999632,
"count": 4,
"self": 0.455273510999632
}
}
},
"_update_policy": {
"total": 304.4670824330017,
"count": 97,
"self": 252.17035869798474,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.29672373501694,
"count": 2910,
"self": 52.29672373501694
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.729998626222368e-07,
"count": 1,
"self": 8.729998626222368e-07
},
"TrainerController._save_models": {
"total": 0.11304043900008764,
"count": 1,
"self": 0.002006434000122681,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11103400499996496,
"count": 1,
"self": 0.11103400499996496
}
}
}
}
}
}
}