Snowball-Target / run_logs /timers.json
NathanS-HuggingFace's picture
First Push
6fbe748
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.7909812927246094,
"min": 2.7909812927246094,
"max": 2.8735625743865967,
"count": 2
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 28797.34375,
"min": 28797.34375,
"max": 29428.154296875,
"count": 2
},
"SnowballTarget.Step.mean": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Step.sum": {
"value": 19992.0,
"min": 9952.0,
"max": 19992.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.2357654571533203,
"min": 0.4367538094520569,
"max": 1.2357654571533203,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 253.3319091796875,
"min": 84.73023986816406,
"max": 253.3319091796875,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06977930348959989,
"min": 0.06887715078415105,
"max": 0.06977930348959989,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.34889651744799943,
"min": 0.2755086031366042,
"max": 0.34889651744799943,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17623081335834428,
"min": 0.10139811878074326,
"max": 0.17623081335834428,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8811540667917215,
"min": 0.40559247512297303,
"max": 0.8811540667917215,
"count": 2
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032007656e-05,
"min": 7.032007656e-05,
"max": 0.00021882002706000002,
"count": 2
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0003516003828,
"min": 0.0003516003828,
"max": 0.0008752801082400001,
"count": 2
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.12344000000000002,
"min": 0.12344000000000002,
"max": 0.17294,
"count": 2
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.6172000000000001,
"min": 0.6172000000000001,
"max": 0.69176,
"count": 2
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0011796560000000003,
"min": 0.0011796560000000003,
"max": 0.003649706,
"count": 2
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.005898280000000001,
"min": 0.005898280000000001,
"max": 0.014598824,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 5.509090909090909,
"min": 3.0,
"max": 5.509090909090909,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 303.0,
"min": 132.0,
"max": 303.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 5.509090909090909,
"min": 3.0,
"max": 5.509090909090909,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 303.0,
"min": 132.0,
"max": 303.0,
"count": 2
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682273964",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682274019"
},
"total": 54.578588362000005,
"count": 1,
"self": 0.6368006950000336,
"children": {
"run_training.setup": {
"total": 0.11350136299995484,
"count": 1,
"self": 0.11350136299995484
},
"TrainerController.start_learning": {
"total": 53.82828630400002,
"count": 1,
"self": 0.11207899099827046,
"children": {
"TrainerController._reset_env": {
"total": 3.820666545999984,
"count": 1,
"self": 3.820666545999984
},
"TrainerController.advance": {
"total": 49.636060193001754,
"count": 1870,
"self": 0.03294616600123845,
"children": {
"env_step": {
"total": 49.603114027000515,
"count": 1870,
"self": 36.085335017007196,
"children": {
"SubprocessEnvManager._take_step": {
"total": 13.48667389199943,
"count": 1870,
"self": 0.17863678699814045,
"children": {
"TorchPolicy.evaluate": {
"total": 13.30803710500129,
"count": 1870,
"self": 13.30803710500129
}
}
},
"workers": {
"total": 0.031105117993888598,
"count": 1870,
"self": 0.0,
"children": {
"worker_root": {
"total": 53.453637332001335,
"count": 1870,
"is_parallel": true,
"self": 26.2376790720009,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005686973000024409,
"count": 1,
"is_parallel": true,
"self": 0.004124490000208425,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001562482999815984,
"count": 10,
"is_parallel": true,
"self": 0.001562482999815984
}
}
},
"UnityEnvironment.step": {
"total": 0.05930108500001552,
"count": 1,
"is_parallel": true,
"self": 0.0005017209999778061,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003546930000766224,
"count": 1,
"is_parallel": true,
"self": 0.0003546930000766224
},
"communicator.exchange": {
"total": 0.05678221799996663,
"count": 1,
"is_parallel": true,
"self": 0.05678221799996663
},
"steps_from_proto": {
"total": 0.001662452999994457,
"count": 1,
"is_parallel": true,
"self": 0.0003645320000487118,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012979209999457453,
"count": 10,
"is_parallel": true,
"self": 0.0012979209999457453
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 27.215958260000434,
"count": 1869,
"is_parallel": true,
"self": 1.1160162700007277,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.6211964489993989,
"count": 1869,
"is_parallel": true,
"self": 0.6211964489993989
},
"communicator.exchange": {
"total": 21.754065528001206,
"count": 1869,
"is_parallel": true,
"self": 21.754065528001206
},
"steps_from_proto": {
"total": 3.7246800129991016,
"count": 1869,
"is_parallel": true,
"self": 0.79468652698813,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.9299934860109715,
"count": 18690,
"is_parallel": true,
"self": 2.9299934860109715
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00032925599998634425,
"count": 1,
"self": 0.00032925599998634425,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 49.28462176099515,
"count": 46701,
"is_parallel": true,
"self": 1.1896125180080617,
"children": {
"process_trajectory": {
"total": 28.09185101298715,
"count": 46701,
"is_parallel": true,
"self": 28.09185101298715
},
"_update_policy": {
"total": 20.00315822999994,
"count": 9,
"is_parallel": true,
"self": 7.159385768000448,
"children": {
"TorchPPOOptimizer.update": {
"total": 12.843772461999492,
"count": 456,
"is_parallel": true,
"self": 12.843772461999492
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.259151318000022,
"count": 1,
"self": 0.0011480100000653692,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25800330799995663,
"count": 1,
"self": 0.25800330799995663
}
}
}
}
}
}
}