llmvetter's picture
First Push
8811eb3 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7811751961708069,
"min": 0.7811751961708069,
"max": 2.835752487182617,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7467.25341796875,
"min": 7467.25341796875,
"max": 29103.328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.51428508758545,
"min": 0.29961034655570984,
"max": 12.603389739990234,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2440.28564453125,
"min": 58.12440872192383,
"max": 2571.091552734375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.062157941995850466,
"min": 0.062157941995850466,
"max": 0.07363082839988684,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24863176798340186,
"min": 0.24863176798340186,
"max": 0.3681541419994342,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20659973538097212,
"min": 0.13207718154972456,
"max": 0.30085180970091446,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8263989415238885,
"min": 0.5283087261988982,
"max": 1.414921169479688,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10538800000000001,
"min": 0.10538800000000001,
"max": 0.29458799999999996,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.42155200000000004,
"min": 0.42155200000000004,
"max": 1.42344,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.045454545454547,
"min": 3.590909090909091,
"max": 24.87272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1058.0,
"min": 158.0,
"max": 1368.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.045454545454547,
"min": 3.590909090909091,
"max": 24.87272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1058.0,
"min": 158.0,
"max": 1368.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718274337",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718274797"
},
"total": 459.34263225300015,
"count": 1,
"self": 0.4422380920004798,
"children": {
"run_training.setup": {
"total": 0.09591170099997726,
"count": 1,
"self": 0.09591170099997726
},
"TrainerController.start_learning": {
"total": 458.8044824599997,
"count": 1,
"self": 0.5953500039668143,
"children": {
"TrainerController._reset_env": {
"total": 3.3577255580000838,
"count": 1,
"self": 3.3577255580000838
},
"TrainerController.advance": {
"total": 454.7593937470331,
"count": 18202,
"self": 0.2856002640733095,
"children": {
"env_step": {
"total": 454.47379348295976,
"count": 18202,
"self": 295.56156775396016,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.61704222900926,
"count": 18202,
"self": 1.4987074399793983,
"children": {
"TorchPolicy.evaluate": {
"total": 157.11833478902986,
"count": 18202,
"self": 157.11833478902986
}
}
},
"workers": {
"total": 0.295183499990344,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 457.45184565598765,
"count": 18202,
"is_parallel": true,
"self": 230.68563124797743,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008555518000093798,
"count": 1,
"is_parallel": true,
"self": 0.005310368999971615,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0032451490001221828,
"count": 10,
"is_parallel": true,
"self": 0.0032451490001221828
}
}
},
"UnityEnvironment.step": {
"total": 0.0380051199999798,
"count": 1,
"is_parallel": true,
"self": 0.0007042649997401895,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003945690000364266,
"count": 1,
"is_parallel": true,
"self": 0.0003945690000364266
},
"communicator.exchange": {
"total": 0.03490846500017142,
"count": 1,
"is_parallel": true,
"self": 0.03490846500017142
},
"steps_from_proto": {
"total": 0.001997821000031763,
"count": 1,
"is_parallel": true,
"self": 0.00039841200009504973,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015994089999367134,
"count": 10,
"is_parallel": true,
"self": 0.0015994089999367134
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 226.76621440801023,
"count": 18201,
"is_parallel": true,
"self": 10.280046169932575,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.484428716020375,
"count": 18201,
"is_parallel": true,
"self": 5.484428716020375
},
"communicator.exchange": {
"total": 176.65048915906436,
"count": 18201,
"is_parallel": true,
"self": 176.65048915906436
},
"steps_from_proto": {
"total": 34.351250362992914,
"count": 18201,
"is_parallel": true,
"self": 6.6025927189969025,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.74865764399601,
"count": 182010,
"is_parallel": true,
"self": 27.74865764399601
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00035488000003169873,
"count": 1,
"self": 0.00035488000003169873,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 449.197269614069,
"count": 675780,
"is_parallel": true,
"self": 14.413778418348102,
"children": {
"process_trajectory": {
"total": 249.18600906472102,
"count": 675780,
"is_parallel": true,
"self": 248.47579084672043,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7102182180005912,
"count": 4,
"is_parallel": true,
"self": 0.7102182180005912
}
}
},
"_update_policy": {
"total": 185.59748213099988,
"count": 90,
"is_parallel": true,
"self": 59.884959256014554,
"children": {
"TorchPPOOptimizer.update": {
"total": 125.71252287498532,
"count": 4584,
"is_parallel": true,
"self": 125.71252287498532
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09165827099968737,
"count": 1,
"self": 0.0009059379995051131,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09075233300018226,
"count": 1,
"self": 0.09075233300018226
}
}
}
}
}
}
}