MrPrjnce's picture
First Push
9e2a8f5 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9515069127082825,
"min": 0.9515069127082825,
"max": 2.8752284049987793,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9105.9208984375,
"min": 9105.9208984375,
"max": 29445.21484375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.049844741821289,
"min": 0.3343978822231293,
"max": 13.10097885131836,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2544.7197265625,
"min": 64.8731918334961,
"max": 2672.599609375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0707615668130779,
"min": 0.061730924758411874,
"max": 0.07360198857635375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2830462672523116,
"min": 0.25501695315139006,
"max": 0.3663747794532409,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20315584934809627,
"min": 0.11235023986014045,
"max": 0.2973370026139652,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8126233973923851,
"min": 0.4494009594405618,
"max": 1.486685013069826,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.886363636363637,
"min": 3.7045454545454546,
"max": 25.90909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1139.0,
"min": 163.0,
"max": 1425.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.886363636363637,
"min": 3.7045454545454546,
"max": 25.90909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1139.0,
"min": 163.0,
"max": 1425.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1712066442",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1712066940"
},
"total": 497.383113117,
"count": 1,
"self": 0.907681003000107,
"children": {
"run_training.setup": {
"total": 0.06111610799985101,
"count": 1,
"self": 0.06111610799985101
},
"TrainerController.start_learning": {
"total": 496.41431600600004,
"count": 1,
"self": 0.6975254459955522,
"children": {
"TrainerController._reset_env": {
"total": 3.499115483999958,
"count": 1,
"self": 3.499115483999958
},
"TrainerController.advance": {
"total": 492.0722646610045,
"count": 18200,
"self": 0.33573380000507314,
"children": {
"env_step": {
"total": 491.73653086099944,
"count": 18200,
"self": 320.380124301,
"children": {
"SubprocessEnvManager._take_step": {
"total": 171.00779579498612,
"count": 18200,
"self": 1.662468067005193,
"children": {
"TorchPolicy.evaluate": {
"total": 169.34532772798093,
"count": 18200,
"self": 169.34532772798093
}
}
},
"workers": {
"total": 0.3486107650132908,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 494.9922847289861,
"count": 18200,
"is_parallel": true,
"self": 242.30437266097852,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005895876000067801,
"count": 1,
"is_parallel": true,
"self": 0.004269425000074989,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016264509999928123,
"count": 10,
"is_parallel": true,
"self": 0.0016264509999928123
}
}
},
"UnityEnvironment.step": {
"total": 0.038170231000094645,
"count": 1,
"is_parallel": true,
"self": 0.0007521520001319004,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004670250000344822,
"count": 1,
"is_parallel": true,
"self": 0.0004670250000344822
},
"communicator.exchange": {
"total": 0.03481749699994907,
"count": 1,
"is_parallel": true,
"self": 0.03481749699994907
},
"steps_from_proto": {
"total": 0.0021335569999791915,
"count": 1,
"is_parallel": true,
"self": 0.0004583089996685885,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001675248000310603,
"count": 10,
"is_parallel": true,
"self": 0.001675248000310603
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 252.68791206800756,
"count": 18199,
"is_parallel": true,
"self": 11.788031042005969,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.045052693013076,
"count": 18199,
"is_parallel": true,
"self": 6.045052693013076
},
"communicator.exchange": {
"total": 197.79003348199103,
"count": 18199,
"is_parallel": true,
"self": 197.79003348199103
},
"steps_from_proto": {
"total": 37.06479485099749,
"count": 18199,
"is_parallel": true,
"self": 7.309634776077928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.75516007491956,
"count": 181990,
"is_parallel": true,
"self": 29.75516007491956
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.378700009510794e-05,
"count": 1,
"self": 4.378700009510794e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 486.100251051932,
"count": 746970,
"is_parallel": true,
"self": 15.944098171974701,
"children": {
"process_trajectory": {
"total": 269.3317503419589,
"count": 746970,
"is_parallel": true,
"self": 268.37732756995865,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9544227720002709,
"count": 4,
"is_parallel": true,
"self": 0.9544227720002709
}
}
},
"_update_policy": {
"total": 200.82440253799837,
"count": 90,
"is_parallel": true,
"self": 56.14095809600485,
"children": {
"TorchPPOOptimizer.update": {
"total": 144.68344444199352,
"count": 4587,
"is_parallel": true,
"self": 144.68344444199352
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14536662799991973,
"count": 1,
"self": 0.0013211890000093263,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1440454389999104,
"count": 1,
"self": 0.1440454389999104
}
}
}
}
}
}
}