ppo-Huggy / run_logs /timers.json
Shrey-1329's picture
Huggy
34c7174
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4036422967910767,
"min": 1.4036422967910767,
"max": 1.42820405960083,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70257.9140625,
"min": 68580.9375,
"max": 77282.1796875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 109.05054945054945,
"min": 90.0327868852459,
"max": 396.1968503937008,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49618.0,
"min": 49034.0,
"max": 50317.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999993.0,
"min": 49869.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999993.0,
"min": 49869.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3263869285583496,
"min": 0.04442630335688591,
"max": 2.4412710666656494,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1058.506103515625,
"min": 5.597714424133301,
"max": 1320.7276611328125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4700588932404153,
"min": 1.7282887296307654,
"max": 3.890988128683263,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1578.8767964243889,
"min": 217.76437993347645,
"max": 2105.0245776176453,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4700588932404153,
"min": 1.7282887296307654,
"max": 3.890988128683263,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1578.8767964243889,
"min": 217.76437993347645,
"max": 2105.0245776176453,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018316355144876676,
"min": 0.013413005329978963,
"max": 0.02010318313453657,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03663271028975335,
"min": 0.026826010659957926,
"max": 0.05552411513441863,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.048325108860929805,
"min": 0.02217448794593414,
"max": 0.05769812017679214,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09665021772185961,
"min": 0.04436810780316591,
"max": 0.15835581036905447,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.472948509050005e-06,
"min": 4.472948509050005e-06,
"max": 0.000295337776554075,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.94589701810001e-06,
"min": 8.94589701810001e-06,
"max": 0.0008439025686991498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10149095,
"min": 0.10149095,
"max": 0.19844592500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2029819,
"min": 0.2029819,
"max": 0.58130085,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.439840500000013e-05,
"min": 8.439840500000013e-05,
"max": 0.0049224516574999985,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016879681000000026,
"min": 0.00016879681000000026,
"max": 0.014066912414999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686586763",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686589138"
},
"total": 2375.653923483,
"count": 1,
"self": 0.6367977189997873,
"children": {
"run_training.setup": {
"total": 0.03777622100000144,
"count": 1,
"self": 0.03777622100000144
},
"TrainerController.start_learning": {
"total": 2374.9793495430004,
"count": 1,
"self": 4.4428626840253855,
"children": {
"TrainerController._reset_env": {
"total": 3.813984005000009,
"count": 1,
"self": 3.813984005000009
},
"TrainerController.advance": {
"total": 2366.546139611975,
"count": 231778,
"self": 4.6609828020532404,
"children": {
"env_step": {
"total": 1849.7502896259723,
"count": 231778,
"self": 1559.1067946049066,
"children": {
"SubprocessEnvManager._take_step": {
"total": 287.8569196980523,
"count": 231778,
"self": 16.859787333993438,
"children": {
"TorchPolicy.evaluate": {
"total": 270.99713236405887,
"count": 222990,
"self": 270.99713236405887
}
}
},
"workers": {
"total": 2.7865753230134374,
"count": 231778,
"self": 0.0,
"children": {
"worker_root": {
"total": 2366.9973489970876,
"count": 231778,
"is_parallel": true,
"self": 1092.7069544271362,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007830089999742995,
"count": 1,
"is_parallel": true,
"self": 0.00021949999995740654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000563509000016893,
"count": 2,
"is_parallel": true,
"self": 0.000563509000016893
}
}
},
"UnityEnvironment.step": {
"total": 0.028359798999986197,
"count": 1,
"is_parallel": true,
"self": 0.00029439700000466473,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019970200003172067,
"count": 1,
"is_parallel": true,
"self": 0.00019970200003172067
},
"communicator.exchange": {
"total": 0.027122037999959048,
"count": 1,
"is_parallel": true,
"self": 0.027122037999959048
},
"steps_from_proto": {
"total": 0.0007436619999907634,
"count": 1,
"is_parallel": true,
"self": 0.00019258300000046802,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005510789999902954,
"count": 2,
"is_parallel": true,
"self": 0.0005510789999902954
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1274.2903945699513,
"count": 231777,
"is_parallel": true,
"self": 38.91909350279093,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.19092295101058,
"count": 231777,
"is_parallel": true,
"self": 78.19092295101058
},
"communicator.exchange": {
"total": 1062.3355358340827,
"count": 231777,
"is_parallel": true,
"self": 1062.3355358340827
},
"steps_from_proto": {
"total": 94.84484228206713,
"count": 231777,
"is_parallel": true,
"self": 33.54854710909461,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.29629517297252,
"count": 463554,
"is_parallel": true,
"self": 61.29629517297252
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 512.1348671839492,
"count": 231778,
"self": 6.755049143949577,
"children": {
"process_trajectory": {
"total": 130.35217865700048,
"count": 231778,
"self": 128.9698429080011,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3823357489993668,
"count": 10,
"self": 1.3823357489993668
}
}
},
"_update_policy": {
"total": 375.0276393829991,
"count": 96,
"self": 315.84879079301237,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.17884858998673,
"count": 2880,
"self": 59.17884858998673
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1669999366858974e-06,
"count": 1,
"self": 1.1669999366858974e-06
},
"TrainerController._save_models": {
"total": 0.17636207500027012,
"count": 1,
"self": 0.004393456000343576,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17196861899992655,
"count": 1,
"self": 0.17196861899992655
}
}
}
}
}
}
}