bcyeung's picture
First Push
c77c730 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.2851881980895996,
"min": 2.2851881980895996,
"max": 2.863436222076416,
"count": 5
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 23327.201171875,
"min": 23327.201171875,
"max": 29229.95703125,
"count": 5
},
"SnowballTarget.Step.mean": {
"value": 49936.0,
"min": 9952.0,
"max": 49936.0,
"count": 5
},
"SnowballTarget.Step.sum": {
"value": 49936.0,
"min": 9952.0,
"max": 49936.0,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 4.90823221206665,
"min": 0.5719503164291382,
"max": 4.90823221206665,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 952.197021484375,
"min": 110.95835876464844,
"max": 952.197021484375,
"count": 5
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0659994036478057,
"min": 0.06516088840336351,
"max": 0.0723172462998442,
"count": 5
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2639976145912228,
"min": 0.2639976145912228,
"max": 0.36158623149922103,
"count": 5
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2905423788755548,
"min": 0.141575666758524,
"max": 0.2905423788755548,
"count": 5
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.1621695155022191,
"min": 0.566302667034096,
"max": 1.3247911702184116,
"count": 5
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.9928090024000003e-05,
"min": 2.9928090024000003e-05,
"max": 0.00026752801082399996,
"count": 5
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00011971236009600001,
"min": 0.00011971236009600001,
"max": 0.0010701120432959998,
"count": 5
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10997600000000002,
"min": 0.10997600000000002,
"max": 0.18917599999999998,
"count": 5
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4399040000000001,
"min": 0.4399040000000001,
"max": 0.8468800000000001,
"count": 5
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0005078024,
"min": 0.0005078024,
"max": 0.0044598824,
"count": 5
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0020312096,
"min": 0.0020312096,
"max": 0.0178395296,
"count": 5
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 5
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 5
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 11.886363636363637,
"min": 3.8636363636363638,
"max": 11.886363636363637,
"count": 5
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 523.0,
"min": 170.0,
"max": 606.0,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 11.886363636363637,
"min": 3.8636363636363638,
"max": 11.886363636363637,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 523.0,
"min": 170.0,
"max": 606.0,
"count": 5
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1729485283",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1729485393"
},
"total": 109.49620752099997,
"count": 1,
"self": 0.4245238790000485,
"children": {
"run_training.setup": {
"total": 0.052073823999990054,
"count": 1,
"self": 0.052073823999990054
},
"TrainerController.start_learning": {
"total": 109.01960981799994,
"count": 1,
"self": 0.08957225399956315,
"children": {
"TrainerController._reset_env": {
"total": 1.6209761740000204,
"count": 1,
"self": 1.6209761740000204
},
"TrainerController.advance": {
"total": 107.21576100400034,
"count": 4592,
"self": 0.09169077099613787,
"children": {
"env_step": {
"total": 75.06641918300375,
"count": 4592,
"self": 56.36825290100296,
"children": {
"SubprocessEnvManager._take_step": {
"total": 18.64736323700083,
"count": 4592,
"self": 0.31530930900089515,
"children": {
"TorchPolicy.evaluate": {
"total": 18.332053927999937,
"count": 4592,
"self": 18.332053927999937
}
}
},
"workers": {
"total": 0.05080304499995236,
"count": 4592,
"self": 0.0,
"children": {
"worker_root": {
"total": 108.49475175799853,
"count": 4592,
"is_parallel": true,
"self": 59.136524051000265,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021947339999996984,
"count": 1,
"is_parallel": true,
"self": 0.0006614869998884387,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015332470001112597,
"count": 10,
"is_parallel": true,
"self": 0.0015332470001112597
}
}
},
"UnityEnvironment.step": {
"total": 0.034506729999975505,
"count": 1,
"is_parallel": true,
"self": 0.0006338469997899665,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038313300001391326,
"count": 1,
"is_parallel": true,
"self": 0.00038313300001391326
},
"communicator.exchange": {
"total": 0.03169043600007626,
"count": 1,
"is_parallel": true,
"self": 0.03169043600007626
},
"steps_from_proto": {
"total": 0.0017993140000953645,
"count": 1,
"is_parallel": true,
"self": 0.0003441309999061559,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014551830001892085,
"count": 10,
"is_parallel": true,
"self": 0.0014551830001892085
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 49.358227706998264,
"count": 4591,
"is_parallel": true,
"self": 2.526485178005146,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.3401401509939888,
"count": 4591,
"is_parallel": true,
"self": 1.3401401509939888
},
"communicator.exchange": {
"total": 37.652184655997644,
"count": 4591,
"is_parallel": true,
"self": 37.652184655997644
},
"steps_from_proto": {
"total": 7.839417722001485,
"count": 4591,
"is_parallel": true,
"self": 1.4192604110000957,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.42015731100139,
"count": 45910,
"is_parallel": true,
"self": 6.42015731100139
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 32.05765105000046,
"count": 4592,
"self": 0.10775439800192999,
"children": {
"process_trajectory": {
"total": 6.665141545998722,
"count": 4592,
"self": 6.5432424249986525,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1218991210000695,
"count": 1,
"self": 0.1218991210000695
}
}
},
"_update_policy": {
"total": 25.284755105999807,
"count": 22,
"self": 10.298084988002984,
"children": {
"TorchPPOOptimizer.update": {
"total": 14.986670117996823,
"count": 1119,
"self": 14.986670117996823
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0239999710393022e-06,
"count": 1,
"self": 1.0239999710393022e-06
},
"TrainerController._save_models": {
"total": 0.09329936200003885,
"count": 1,
"self": 0.0011963020000393954,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09210305999999946,
"count": 1,
"self": 0.09210305999999946
}
}
}
}
}
}
}