DivyaMathi's picture
Snow ball target
d8707a3 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.895058810710907,
"min": 0.895058810710907,
"max": 2.867727518081665,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8546.021484375,
"min": 8546.021484375,
"max": 29431.48828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.778275489807129,
"min": 0.3487585783004761,
"max": 12.778275489807129,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2491.763671875,
"min": 67.65916442871094,
"max": 2569.97412109375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06490606245443262,
"min": 0.06355480209858083,
"max": 0.07611543758185671,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25962424981773047,
"min": 0.2542192083943233,
"max": 0.3466881714491904,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20160604527621878,
"min": 0.13051440546006038,
"max": 0.27540178930058196,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8064241811048751,
"min": 0.5220576218402415,
"max": 1.3770089465029098,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.545454545454547,
"min": 3.590909090909091,
"max": 25.545454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1124.0,
"min": 158.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.545454545454547,
"min": 3.590909090909091,
"max": 25.545454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1124.0,
"min": 158.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710417191",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710417642"
},
"total": 450.84737664,
"count": 1,
"self": 0.4904453559998956,
"children": {
"run_training.setup": {
"total": 0.05772886100010055,
"count": 1,
"self": 0.05772886100010055
},
"TrainerController.start_learning": {
"total": 450.299202423,
"count": 1,
"self": 0.5592257149967281,
"children": {
"TrainerController._reset_env": {
"total": 3.2387265180000213,
"count": 1,
"self": 3.2387265180000213
},
"TrainerController.advance": {
"total": 446.40702824400296,
"count": 18199,
"self": 0.2736396099958256,
"children": {
"env_step": {
"total": 446.13338863400713,
"count": 18199,
"self": 289.6148570499968,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.23185259699483,
"count": 18199,
"self": 1.4672364979887789,
"children": {
"TorchPolicy.evaluate": {
"total": 154.76461609900605,
"count": 18199,
"self": 154.76461609900605
}
}
},
"workers": {
"total": 0.2866789870155344,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 449.10146330798773,
"count": 18199,
"is_parallel": true,
"self": 223.29518819798966,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006115463000014643,
"count": 1,
"is_parallel": true,
"self": 0.0037738489999128433,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023416140001017993,
"count": 10,
"is_parallel": true,
"self": 0.0023416140001017993
}
}
},
"UnityEnvironment.step": {
"total": 0.037087937000023885,
"count": 1,
"is_parallel": true,
"self": 0.0007344009999314949,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000413129000094159,
"count": 1,
"is_parallel": true,
"self": 0.000413129000094159
},
"communicator.exchange": {
"total": 0.03397134099998311,
"count": 1,
"is_parallel": true,
"self": 0.03397134099998311
},
"steps_from_proto": {
"total": 0.0019690660000151183,
"count": 1,
"is_parallel": true,
"self": 0.00036432800015973044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001604737999855388,
"count": 10,
"is_parallel": true,
"self": 0.001604737999855388
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 225.80627510999807,
"count": 18198,
"is_parallel": true,
"self": 10.439828306015215,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.343305246989644,
"count": 18198,
"is_parallel": true,
"self": 5.343305246989644
},
"communicator.exchange": {
"total": 176.0169577109948,
"count": 18198,
"is_parallel": true,
"self": 176.0169577109948
},
"steps_from_proto": {
"total": 34.00618384599841,
"count": 18198,
"is_parallel": true,
"self": 6.415854606012772,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.590329239985635,
"count": 181980,
"is_parallel": true,
"self": 27.590329239985635
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014973200018175703,
"count": 1,
"self": 0.00014973200018175703,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 440.7865675549224,
"count": 683206,
"is_parallel": true,
"self": 14.823541655873555,
"children": {
"process_trajectory": {
"total": 242.23339714204894,
"count": 683206,
"is_parallel": true,
"self": 241.64645661004897,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5869405319999714,
"count": 4,
"is_parallel": true,
"self": 0.5869405319999714
}
}
},
"_update_policy": {
"total": 183.7296287569999,
"count": 90,
"is_parallel": true,
"self": 52.39633800999263,
"children": {
"TorchPPOOptimizer.update": {
"total": 131.33329074700725,
"count": 4587,
"is_parallel": true,
"self": 131.33329074700725
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09407221400010712,
"count": 1,
"self": 0.0009550180000132968,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09311719600009383,
"count": 1,
"self": 0.09311719600009383
}
}
}
}
}
}
}