ppo-Huggy / run_logs /timers.json
mkuntz's picture
Huggy
9b96929
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4085955619812012,
"min": 1.4085955619812012,
"max": 1.42913818359375,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70949.546875,
"min": 68507.6953125,
"max": 76356.0859375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.28776978417267,
"min": 80.57096247960848,
"max": 400.072,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49088.0,
"min": 49071.0,
"max": 50009.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999954.0,
"min": 49894.0,
"max": 1999954.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999954.0,
"min": 49894.0,
"max": 1999954.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.472074270248413,
"min": 0.15924637019634247,
"max": 2.477937698364258,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1374.4732666015625,
"min": 19.746549606323242,
"max": 1491.5274658203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.850363896261874,
"min": 1.826816688141515,
"max": 3.965327724922134,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2140.802326321602,
"min": 226.52526932954788,
"max": 2316.9926323890686,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.850363896261874,
"min": 1.826816688141515,
"max": 3.965327724922134,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2140.802326321602,
"min": 226.52526932954788,
"max": 2316.9926323890686,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01668566000039896,
"min": 0.014109828296932391,
"max": 0.021327105734477905,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050056980001196884,
"min": 0.028219656593864783,
"max": 0.05703028967691353,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05502370351718532,
"min": 0.02096851859241724,
"max": 0.06496741610268753,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16507111055155596,
"min": 0.04193703718483448,
"max": 0.18499911949038506,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.35009888333333e-06,
"min": 3.35009888333333e-06,
"max": 0.00029532225155924993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.005029664999999e-05,
"min": 1.005029664999999e-05,
"max": 0.0008439214686928498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111666666666667,
"min": 0.10111666666666667,
"max": 0.19844075000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30335,
"min": 0.20737894999999998,
"max": 0.58130715,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.572166666666663e-05,
"min": 6.572166666666663e-05,
"max": 0.004922193425000002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019716499999999988,
"min": 0.00019716499999999988,
"max": 0.014067226784999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670875646",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670877805"
},
"total": 2158.486982511,
"count": 1,
"self": 0.3905881360001331,
"children": {
"run_training.setup": {
"total": 0.10623093899999958,
"count": 1,
"self": 0.10623093899999958
},
"TrainerController.start_learning": {
"total": 2157.990163436,
"count": 1,
"self": 3.6982750880447384,
"children": {
"TrainerController._reset_env": {
"total": 10.312296010000011,
"count": 1,
"self": 10.312296010000011
},
"TrainerController.advance": {
"total": 2143.868187329955,
"count": 232782,
"self": 3.9483768620470983,
"children": {
"env_step": {
"total": 1680.535987599947,
"count": 232782,
"self": 1408.6570077899983,
"children": {
"SubprocessEnvManager._take_step": {
"total": 269.3902214990408,
"count": 232782,
"self": 13.86986571199634,
"children": {
"TorchPolicy.evaluate": {
"total": 255.52035578704448,
"count": 223014,
"self": 64.2745947640891,
"children": {
"TorchPolicy.sample_actions": {
"total": 191.24576102295538,
"count": 223014,
"self": 191.24576102295538
}
}
}
}
},
"workers": {
"total": 2.4887583109078264,
"count": 232782,
"self": 0.0,
"children": {
"worker_root": {
"total": 2150.5369853940574,
"count": 232782,
"is_parallel": true,
"self": 991.8166286050841,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001952205000009144,
"count": 1,
"is_parallel": true,
"self": 0.00032428899999104033,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016279160000181037,
"count": 2,
"is_parallel": true,
"self": 0.0016279160000181037
}
}
},
"UnityEnvironment.step": {
"total": 0.03004080900001327,
"count": 1,
"is_parallel": true,
"self": 0.00026795799999490555,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018040499998051018,
"count": 1,
"is_parallel": true,
"self": 0.00018040499998051018
},
"communicator.exchange": {
"total": 0.028883955000026162,
"count": 1,
"is_parallel": true,
"self": 0.028883955000026162
},
"steps_from_proto": {
"total": 0.0007084910000116906,
"count": 1,
"is_parallel": true,
"self": 0.0002447319999987485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004637590000129421,
"count": 2,
"is_parallel": true,
"self": 0.0004637590000129421
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1158.7203567889733,
"count": 232781,
"is_parallel": true,
"self": 34.40572150272669,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.22156169906322,
"count": 232781,
"is_parallel": true,
"self": 75.22156169906322
},
"communicator.exchange": {
"total": 958.0187422020999,
"count": 232781,
"is_parallel": true,
"self": 958.0187422020999
},
"steps_from_proto": {
"total": 91.0743313850835,
"count": 232781,
"is_parallel": true,
"self": 37.520075818196744,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.55425556688675,
"count": 465562,
"is_parallel": true,
"self": 53.55425556688675
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 459.383822867961,
"count": 232782,
"self": 5.842758490982931,
"children": {
"process_trajectory": {
"total": 145.01140295497714,
"count": 232782,
"self": 144.54100463297755,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4703983219995962,
"count": 4,
"self": 0.4703983219995962
}
}
},
"_update_policy": {
"total": 308.5296614220009,
"count": 97,
"self": 255.9774852470012,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.552176174999715,
"count": 2910,
"self": 52.552176174999715
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.600003068044316e-07,
"count": 1,
"self": 9.600003068044316e-07
},
"TrainerController._save_models": {
"total": 0.11140404799971293,
"count": 1,
"self": 0.001935944999786443,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10946810299992649,
"count": 1,
"self": 0.10946810299992649
}
}
}
}
}
}
}