mnneely's picture
First Push
4410969 verified
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9628199338912964,
"min": 0.9628199338912964,
"max": 2.861560821533203,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9203.595703125,
"min": 9203.595703125,
"max": 29336.720703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.375421524047852,
"min": 0.32458388805389404,
"max": 12.375421524047852,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2413.207275390625,
"min": 62.96927261352539,
"max": 2474.7431640625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06758645636835033,
"min": 0.06126248645716045,
"max": 0.07374570427052626,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2703458254734013,
"min": 0.2450499458286418,
"max": 0.36872852135263134,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20510374426421726,
"min": 0.12088820846139582,
"max": 0.2755439681019269,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.820414977056869,
"min": 0.4835528338455833,
"max": 1.2980622348795625,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.106097298000005e-06,
"min": 8.106097298000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.242438919200002e-05,
"min": 3.242438919200002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10270200000000002,
"min": 0.10270200000000002,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41080800000000006,
"min": 0.41080800000000006,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014482980000000007,
"min": 0.00014482980000000007,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005793192000000003,
"min": 0.0005793192000000003,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.65909090909091,
"min": 3.5,
"max": 24.65909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1085.0,
"min": 154.0,
"max": 1345.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.65909090909091,
"min": 3.5,
"max": 24.65909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1085.0,
"min": 154.0,
"max": 1345.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1725986967",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1725987431"
},
"total": 463.672254031,
"count": 1,
"self": 0.4257120249999389,
"children": {
"run_training.setup": {
"total": 0.051715738000041256,
"count": 1,
"self": 0.051715738000041256
},
"TrainerController.start_learning": {
"total": 463.19482626800004,
"count": 1,
"self": 0.6059930999979883,
"children": {
"TrainerController._reset_env": {
"total": 2.4039057100000036,
"count": 1,
"self": 2.4039057100000036
},
"TrainerController.advance": {
"total": 460.09476485800207,
"count": 18202,
"self": 0.27696216500373794,
"children": {
"env_step": {
"total": 459.8178026929983,
"count": 18202,
"self": 285.4644331730145,
"children": {
"SubprocessEnvManager._take_step": {
"total": 174.06386217098895,
"count": 18202,
"self": 1.4682396799812523,
"children": {
"TorchPolicy.evaluate": {
"total": 172.5956224910077,
"count": 18202,
"self": 172.5956224910077
}
}
},
"workers": {
"total": 0.28950734899484587,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 461.96124807199743,
"count": 18202,
"is_parallel": true,
"self": 242.06280797999602,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021628779999787184,
"count": 1,
"is_parallel": true,
"self": 0.0006412940001041534,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001521583999874565,
"count": 10,
"is_parallel": true,
"self": 0.001521583999874565
}
}
},
"UnityEnvironment.step": {
"total": 0.034674886999994214,
"count": 1,
"is_parallel": true,
"self": 0.0006496310001011807,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042831999996906234,
"count": 1,
"is_parallel": true,
"self": 0.00042831999996906234
},
"communicator.exchange": {
"total": 0.03163721599997871,
"count": 1,
"is_parallel": true,
"self": 0.03163721599997871
},
"steps_from_proto": {
"total": 0.001959719999945264,
"count": 1,
"is_parallel": true,
"self": 0.00039042299982838813,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001569297000116876,
"count": 10,
"is_parallel": true,
"self": 0.001569297000116876
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 219.8984400920014,
"count": 18201,
"is_parallel": true,
"self": 10.055187758987643,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.181274606006468,
"count": 18201,
"is_parallel": true,
"self": 5.181274606006468
},
"communicator.exchange": {
"total": 172.14785497499923,
"count": 18201,
"is_parallel": true,
"self": 172.14785497499923
},
"steps_from_proto": {
"total": 32.51412275200806,
"count": 18201,
"is_parallel": true,
"self": 6.301519093024126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.212603658983937,
"count": 182010,
"is_parallel": true,
"self": 26.212603658983937
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001331839999920703,
"count": 1,
"self": 0.0001331839999920703,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 454.2687329410058,
"count": 718054,
"is_parallel": true,
"self": 14.168502214972989,
"children": {
"process_trajectory": {
"total": 253.72478464103273,
"count": 718054,
"is_parallel": true,
"self": 252.88184963303274,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8429350079999836,
"count": 4,
"is_parallel": true,
"self": 0.8429350079999836
}
}
},
"_update_policy": {
"total": 186.37544608500008,
"count": 90,
"is_parallel": true,
"self": 59.47531346699799,
"children": {
"TorchPPOOptimizer.update": {
"total": 126.90013261800209,
"count": 4581,
"is_parallel": true,
"self": 126.90013261800209
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09002941599999303,
"count": 1,
"self": 0.0008912219999501758,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08913819400004286,
"count": 1,
"self": 0.08913819400004286
}
}
}
}
}
}
}