oookayamaswallow's picture
First Push
793f5d3 verified
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8370972275733948,
"min": 0.8370972275733948,
"max": 2.8588879108428955,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7992.6044921875,
"min": 7992.6044921875,
"max": 29309.318359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.7811861038208,
"min": 0.2791458070278168,
"max": 12.972125053405762,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2492.331298828125,
"min": 54.15428924560547,
"max": 2645.44580078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06738024796519027,
"min": 0.06291451561107761,
"max": 0.07693827273871968,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26952099186076106,
"min": 0.25165806244431044,
"max": 0.3846913636935984,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20532643261785602,
"min": 0.12644542183147234,
"max": 0.2712783714719847,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8213057304714241,
"min": 0.5057816873258894,
"max": 1.3563918573599234,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.113636363636363,
"min": 3.477272727272727,
"max": 25.636363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1105.0,
"min": 153.0,
"max": 1410.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.113636363636363,
"min": 3.477272727272727,
"max": 25.636363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1105.0,
"min": 153.0,
"max": 1410.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722423914",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722424390"
},
"total": 475.9005632330002,
"count": 1,
"self": 0.43157541400023547,
"children": {
"run_training.setup": {
"total": 0.07085516400002234,
"count": 1,
"self": 0.07085516400002234
},
"TrainerController.start_learning": {
"total": 475.3981326549999,
"count": 1,
"self": 0.6526122389834654,
"children": {
"TrainerController._reset_env": {
"total": 2.798851610999918,
"count": 1,
"self": 2.798851610999918
},
"TrainerController.advance": {
"total": 471.83864539701653,
"count": 18201,
"self": 0.3056806940360275,
"children": {
"env_step": {
"total": 471.5329647029805,
"count": 18201,
"self": 307.6519660479971,
"children": {
"SubprocessEnvManager._take_step": {
"total": 163.5759928999928,
"count": 18201,
"self": 1.5889170349994401,
"children": {
"TorchPolicy.evaluate": {
"total": 161.98707586499336,
"count": 18201,
"self": 161.98707586499336
}
}
},
"workers": {
"total": 0.3050057549905887,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 473.9386841219996,
"count": 18201,
"is_parallel": true,
"self": 240.2492969940074,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005626583000093888,
"count": 1,
"is_parallel": true,
"self": 0.003965077000202655,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001661505999891233,
"count": 10,
"is_parallel": true,
"self": 0.001661505999891233
}
}
},
"UnityEnvironment.step": {
"total": 0.03818623500001195,
"count": 1,
"is_parallel": true,
"self": 0.0007201499997790961,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003840470001250651,
"count": 1,
"is_parallel": true,
"self": 0.0003840470001250651
},
"communicator.exchange": {
"total": 0.03497716600008971,
"count": 1,
"is_parallel": true,
"self": 0.03497716600008971
},
"steps_from_proto": {
"total": 0.0021048720000180765,
"count": 1,
"is_parallel": true,
"self": 0.00040893599975788675,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016959360002601898,
"count": 10,
"is_parallel": true,
"self": 0.0016959360002601898
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 233.68938712799218,
"count": 18200,
"is_parallel": true,
"self": 10.48078469994607,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.682723615992472,
"count": 18200,
"is_parallel": true,
"self": 5.682723615992472
},
"communicator.exchange": {
"total": 182.06767561701872,
"count": 18200,
"is_parallel": true,
"self": 182.06767561701872
},
"steps_from_proto": {
"total": 35.45820319503491,
"count": 18200,
"is_parallel": true,
"self": 6.866202692986008,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.592000502048904,
"count": 182000,
"is_parallel": true,
"self": 28.592000502048904
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016744699996706913,
"count": 1,
"self": 0.00016744699996706913,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 465.90514656193136,
"count": 683845,
"is_parallel": true,
"self": 14.992023835956616,
"children": {
"process_trajectory": {
"total": 259.1903054119739,
"count": 683845,
"is_parallel": true,
"self": 258.483158951974,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7071464599998762,
"count": 4,
"is_parallel": true,
"self": 0.7071464599998762
}
}
},
"_update_policy": {
"total": 191.72281731400085,
"count": 90,
"is_parallel": true,
"self": 61.20790993500623,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.51490737899462,
"count": 4587,
"is_parallel": true,
"self": 130.51490737899462
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10785596100004113,
"count": 1,
"self": 0.0009831899999426241,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10687277100009851,
"count": 1,
"self": 0.10687277100009851
}
}
}
}
}
}
}