Exyl's picture
First Push (400k steps)
405d1a3 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8433369398117065,
"min": 0.8144069910049438,
"max": 1.022692322731018,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8580.953125,
"min": 7875.98095703125,
"max": 10462.142578125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 399968.0,
"min": 209936.0,
"max": 399968.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 399968.0,
"min": 209936.0,
"max": 399968.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.582033157348633,
"min": 6.212385654449463,
"max": 6.709054470062256,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1283.4964599609375,
"min": 1192.778076171875,
"max": 1368.6470947265625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.14048790551024964,
"min": 0.13636182126933818,
"max": 0.14898715887346392,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.5619516220409986,
"min": 0.5488027888169895,
"max": 0.7262440745075498,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.16154421320842471,
"min": 0.15663274288903373,
"max": 0.19105696387123317,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6461768528336989,
"min": 0.6265309715561349,
"max": 0.9391194761812907,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.3190986810000008e-06,
"min": 1.3190986810000008e-06,
"max": 4.8619051381e-05,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.276394724000003e-06,
"min": 5.276394724000003e-06,
"max": 0.00023072026928,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10131899999999999,
"min": 0.10131899999999999,
"max": 0.148619,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40527599999999997,
"min": 0.40527599999999997,
"max": 0.73072,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.581810000000006e-05,
"min": 7.581810000000006e-05,
"max": 0.0024360881,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00030327240000000024,
"min": 0.00030327240000000024,
"max": 0.011562928000000002,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.75,
"min": 24.636363636363637,
"max": 26.068181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1133.0,
"min": 1084.0,
"max": 1430.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.75,
"min": 24.636363636363637,
"max": 26.068181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1133.0,
"min": 1084.0,
"max": 1430.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705160624",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705161283"
},
"total": 659.3538398339997,
"count": 1,
"self": 0.42454440399933446,
"children": {
"run_training.setup": {
"total": 0.05023628100025235,
"count": 1,
"self": 0.05023628100025235
},
"TrainerController.start_learning": {
"total": 658.8790591490001,
"count": 1,
"self": 0.5929154839936928,
"children": {
"TrainerController._reset_env": {
"total": 2.5762412779999977,
"count": 1,
"self": 2.5762412779999977
},
"TrainerController.advance": {
"total": 655.6235476270062,
"count": 18200,
"self": 0.2739549480165806,
"children": {
"env_step": {
"total": 655.3495926789897,
"count": 18200,
"self": 513.9107466449577,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.1498471910195,
"count": 18200,
"self": 1.5597183370273342,
"children": {
"TorchPolicy.evaluate": {
"total": 139.59012885399216,
"count": 18200,
"self": 139.59012885399216
}
}
},
"workers": {
"total": 0.2889988430124504,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 657.497998932999,
"count": 18200,
"is_parallel": true,
"self": 419.6885962810279,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002771499999653315,
"count": 1,
"is_parallel": true,
"self": 0.0006675750000795233,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002103924999573792,
"count": 10,
"is_parallel": true,
"self": 0.002103924999573792
}
}
},
"UnityEnvironment.step": {
"total": 0.07250889300030394,
"count": 1,
"is_parallel": true,
"self": 0.0006575490001523576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039021600014166324,
"count": 1,
"is_parallel": true,
"self": 0.00039021600014166324
},
"communicator.exchange": {
"total": 0.066217818000041,
"count": 1,
"is_parallel": true,
"self": 0.066217818000041
},
"steps_from_proto": {
"total": 0.005243309999968915,
"count": 1,
"is_parallel": true,
"self": 0.00045627399913428235,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004787036000834632,
"count": 10,
"is_parallel": true,
"self": 0.004787036000834632
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 237.80940265197114,
"count": 18199,
"is_parallel": true,
"self": 11.098548347012184,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.93289383192905,
"count": 18199,
"is_parallel": true,
"self": 5.93289383192905
},
"communicator.exchange": {
"total": 184.83937246500545,
"count": 18199,
"is_parallel": true,
"self": 184.83937246500545
},
"steps_from_proto": {
"total": 35.93858800802445,
"count": 18199,
"is_parallel": true,
"self": 6.688144634965283,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.25044337305917,
"count": 181990,
"is_parallel": true,
"self": 29.25044337305917
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00021364200074458495,
"count": 1,
"self": 0.00021364200074458495,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 649.6792202470401,
"count": 679848,
"is_parallel": true,
"self": 14.798981480899329,
"children": {
"process_trajectory": {
"total": 255.4174487901405,
"count": 679848,
"is_parallel": true,
"self": 254.6711597991407,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7462889909998012,
"count": 4,
"is_parallel": true,
"self": 0.7462889909998012
}
}
},
"_update_policy": {
"total": 379.4627899760003,
"count": 90,
"is_parallel": true,
"self": 67.99034636095621,
"children": {
"TorchPPOOptimizer.update": {
"total": 311.4724436150441,
"count": 24476,
"is_parallel": true,
"self": 311.4724436150441
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08614111799943203,
"count": 1,
"self": 0.0007831609991626465,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08535795700026938,
"count": 1,
"self": 0.08535795700026938
}
}
}
}
}
}
}