HungChau's picture
2000k steps
f96bb59 verified
raw
history blame
18.6 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5291388034820557,
"min": 0.5173834562301636,
"max": 2.8587822914123535,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5413.08984375,
"min": 4999.05712890625,
"max": 29245.34375,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.239165306091309,
"min": 0.3915049433708191,
"max": 14.386966705322266,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2919.02880859375,
"min": 75.95195770263672,
"max": 2949.328125,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06509208678925302,
"min": 0.05793846801337649,
"max": 0.07891193099431784,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.32546043394626506,
"min": 0.23175387205350595,
"max": 0.3766518468328286,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15311466061017093,
"min": 0.10854222133125671,
"max": 0.2772454929848512,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7655733030508546,
"min": 0.43416888532502684,
"max": 1.3351598829030993,
"count": 200
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032997656000039e-07,
"min": 7.032997656000039e-07,
"max": 0.00029918820027059994,
"count": 200
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.5164988280000196e-06,
"min": 3.5164988280000196e-06,
"max": 0.0014885160038279998,
"count": 200
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10023440000000002,
"min": 0.10023440000000002,
"max": 0.1997294,
"count": 200
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5011720000000001,
"min": 0.4029176,
"max": 0.996172,
"count": 200
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1696560000000067e-05,
"min": 2.1696560000000067e-05,
"max": 0.004986497059999999,
"count": 200
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00010848280000000034,
"min": 0.00010848280000000034,
"max": 0.024808982800000004,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.163636363636364,
"min": 3.3863636363636362,
"max": 28.477272727272727,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1549.0,
"min": 149.0,
"max": 1561.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.163636363636364,
"min": 3.3863636363636362,
"max": 28.477272727272727,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1549.0,
"min": 149.0,
"max": 1561.0,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1720130833",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1720135482"
},
"total": 4649.509552075001,
"count": 1,
"self": 0.48859560799974133,
"children": {
"run_training.setup": {
"total": 0.05359070500071539,
"count": 1,
"self": 0.05359070500071539
},
"TrainerController.start_learning": {
"total": 4648.967365762001,
"count": 1,
"self": 5.910175899049136,
"children": {
"TrainerController._reset_env": {
"total": 2.937081534999379,
"count": 1,
"self": 2.937081534999379
},
"TrainerController.advance": {
"total": 4640.029829719953,
"count": 181872,
"self": 2.9316713241478283,
"children": {
"env_step": {
"total": 4637.0981583958055,
"count": 181872,
"self": 3022.50998150295,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1611.6955746918266,
"count": 181872,
"self": 15.139883063241541,
"children": {
"TorchPolicy.evaluate": {
"total": 1596.555691628585,
"count": 181872,
"self": 1596.555691628585
}
}
},
"workers": {
"total": 2.892602201029149,
"count": 181872,
"self": 0.0,
"children": {
"worker_root": {
"total": 4636.800737390936,
"count": 181872,
"is_parallel": true,
"self": 2318.011387791349,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0030143000003590714,
"count": 1,
"is_parallel": true,
"self": 0.0007683900012125378,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022459099991465337,
"count": 10,
"is_parallel": true,
"self": 0.0022459099991465337
}
}
},
"UnityEnvironment.step": {
"total": 0.07776201500018942,
"count": 1,
"is_parallel": true,
"self": 0.0007039259999146452,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000420372999542451,
"count": 1,
"is_parallel": true,
"self": 0.000420372999542451
},
"communicator.exchange": {
"total": 0.07455213899993396,
"count": 1,
"is_parallel": true,
"self": 0.07455213899993396
},
"steps_from_proto": {
"total": 0.0020855770007983665,
"count": 1,
"is_parallel": true,
"self": 0.0004085379996467964,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016770390011515701,
"count": 10,
"is_parallel": true,
"self": 0.0016770390011515701
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2318.789349599587,
"count": 181871,
"is_parallel": true,
"self": 103.76306407735319,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 55.42352665841645,
"count": 181871,
"is_parallel": true,
"self": 55.42352665841645
},
"communicator.exchange": {
"total": 1806.3840215860182,
"count": 181871,
"is_parallel": true,
"self": 1806.3840215860182
},
"steps_from_proto": {
"total": 353.21873727779894,
"count": 181871,
"is_parallel": true,
"self": 66.42432226970959,
"children": {
"_process_rank_one_or_two_observation": {
"total": 286.79441500808935,
"count": 1818710,
"is_parallel": true,
"self": 286.79441500808935
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00028783099878637586,
"count": 1,
"self": 0.00028783099878637586,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4582.17035785442,
"count": 6804312,
"is_parallel": true,
"self": 154.48491806491893,
"children": {
"process_trajectory": {
"total": 2526.759622636476,
"count": 6804312,
"is_parallel": true,
"self": 2518.2699130204737,
"children": {
"RLTrainer._checkpoint": {
"total": 8.489709616002074,
"count": 40,
"is_parallel": true,
"self": 8.489709616002074
}
}
},
"_update_policy": {
"total": 1900.9258171530255,
"count": 909,
"is_parallel": true,
"self": 596.7985820900067,
"children": {
"TorchPPOOptimizer.update": {
"total": 1304.1272350630188,
"count": 46341,
"is_parallel": true,
"self": 1304.1272350630188
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08999077699991176,
"count": 1,
"self": 0.0013316320000740234,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08865914499983774,
"count": 1,
"self": 0.08865914499983774
}
}
}
}
}
}
}