ppo-Huggy / run_logs /timers.json
ArthurinRUC's picture
Huggy
0b72f76
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4029535055160522,
"min": 1.4029535055160522,
"max": 1.4244345426559448,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69343.78125,
"min": 69343.78125,
"max": 76363.0546875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 93.36037735849057,
"min": 80.7825370675453,
"max": 413.3688524590164,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49481.0,
"min": 48852.0,
"max": 50431.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999948.0,
"min": 49882.0,
"max": 1999948.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999948.0,
"min": 49882.0,
"max": 1999948.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4274144172668457,
"min": 0.03356517106294632,
"max": 2.527374029159546,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1286.5296630859375,
"min": 4.061385631561279,
"max": 1473.9462890625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7261958983709227,
"min": 1.8045224079415818,
"max": 3.9996498343439733,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1974.883826136589,
"min": 218.3472113609314,
"max": 2293.3822872638702,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7261958983709227,
"min": 1.8045224079415818,
"max": 3.9996498343439733,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1974.883826136589,
"min": 218.3472113609314,
"max": 2293.3822872638702,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017787929915034004,
"min": 0.0120234203564299,
"max": 0.019568497585714793,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05336378974510202,
"min": 0.0240468407128598,
"max": 0.055746733442235075,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054995195319255195,
"min": 0.022320151887834074,
"max": 0.0617127584086524,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16498558595776558,
"min": 0.04464030377566815,
"max": 0.18513827522595722,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.676198774633326e-06,
"min": 3.676198774633326e-06,
"max": 0.000295364476545175,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1028596323899979e-05,
"min": 1.1028596323899979e-05,
"max": 0.00084407926864025,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122536666666669,
"min": 0.10122536666666669,
"max": 0.19845482500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30367610000000006,
"min": 0.20758130000000002,
"max": 0.5813597500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.114579666666655e-05,
"min": 7.114579666666655e-05,
"max": 0.004922895767499998,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021343738999999966,
"min": 0.00021343738999999966,
"max": 0.014069851525000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671097831",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671100041"
},
"total": 2210.619342388,
"count": 1,
"self": 0.44367952299990066,
"children": {
"run_training.setup": {
"total": 0.1126852099999951,
"count": 1,
"self": 0.1126852099999951
},
"TrainerController.start_learning": {
"total": 2210.062977655,
"count": 1,
"self": 3.8411276629399254,
"children": {
"TrainerController._reset_env": {
"total": 11.415210807999983,
"count": 1,
"self": 11.415210807999983
},
"TrainerController.advance": {
"total": 2194.6832953900603,
"count": 232403,
"self": 3.9324716310493386,
"children": {
"env_step": {
"total": 1718.2728165300189,
"count": 232403,
"self": 1446.468500942048,
"children": {
"SubprocessEnvManager._take_step": {
"total": 269.3179514289263,
"count": 232403,
"self": 13.873063104919709,
"children": {
"TorchPolicy.evaluate": {
"total": 255.44488832400657,
"count": 222992,
"self": 64.59893484501197,
"children": {
"TorchPolicy.sample_actions": {
"total": 190.8459534789946,
"count": 222992,
"self": 190.8459534789946
}
}
}
}
},
"workers": {
"total": 2.486364159044683,
"count": 232403,
"self": 0.0,
"children": {
"worker_root": {
"total": 2202.5106388039976,
"count": 232403,
"is_parallel": true,
"self": 1009.2064854079888,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023580869999477727,
"count": 1,
"is_parallel": true,
"self": 0.00031064899997090833,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020474379999768644,
"count": 2,
"is_parallel": true,
"self": 0.0020474379999768644
}
}
},
"UnityEnvironment.step": {
"total": 0.026438904000031016,
"count": 1,
"is_parallel": true,
"self": 0.00029284800007189915,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001944839999623582,
"count": 1,
"is_parallel": true,
"self": 0.0001944839999623582
},
"communicator.exchange": {
"total": 0.025272449999988567,
"count": 1,
"is_parallel": true,
"self": 0.025272449999988567
},
"steps_from_proto": {
"total": 0.0006791220000081921,
"count": 1,
"is_parallel": true,
"self": 0.00023304100000132166,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00044608100000687045,
"count": 2,
"is_parallel": true,
"self": 0.00044608100000687045
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1193.3041533960088,
"count": 232402,
"is_parallel": true,
"self": 34.57051162788889,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.7155330470419,
"count": 232402,
"is_parallel": true,
"self": 74.7155330470419
},
"communicator.exchange": {
"total": 992.5537684480516,
"count": 232402,
"is_parallel": true,
"self": 992.5537684480516
},
"steps_from_proto": {
"total": 91.46434027302632,
"count": 232402,
"is_parallel": true,
"self": 37.44810493105081,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.016235341975516,
"count": 464804,
"is_parallel": true,
"self": 54.016235341975516
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 472.4780072289922,
"count": 232403,
"self": 5.964201429904563,
"children": {
"process_trajectory": {
"total": 147.69708570308796,
"count": 232403,
"self": 146.515306323088,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1817793799999663,
"count": 10,
"self": 1.1817793799999663
}
}
},
"_update_policy": {
"total": 318.8167200959997,
"count": 97,
"self": 265.30878283399784,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.50793726200186,
"count": 2910,
"self": 53.50793726200186
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3050002962700091e-06,
"count": 1,
"self": 1.3050002962700091e-06
},
"TrainerController._save_models": {
"total": 0.1233424889996968,
"count": 1,
"self": 0.0025410429993826256,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12080144600031417,
"count": 1,
"self": 0.12080144600031417
}
}
}
}
}
}
}