cfalholt's picture
initial commit
c27f744
raw
history blame
19 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8497298955917358,
"min": 0.8497298955917358,
"max": 2.854966878890991,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8103.8740234375,
"min": 8103.8740234375,
"max": 29237.71484375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.520991325378418,
"min": 0.39857420325279236,
"max": 13.520991325378418,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2636.59326171875,
"min": 77.32339477539062,
"max": 2733.151123046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.051640553846482355,
"min": 0.04241921850158784,
"max": 0.053829570670980806,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.20656221538592942,
"min": 0.16967687400635137,
"max": 0.26914785335490404,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18384178029373288,
"min": 0.16487218253314495,
"max": 0.3027531748016675,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7353671211749315,
"min": 0.6594887301325798,
"max": 1.5137658740083375,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.885809730600001e-05,
"min": 1.885809730600001e-05,
"max": 0.0006810580027059999,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 7.543238922400004e-05,
"min": 7.543238922400004e-05,
"max": 0.0032320400382800005,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014443060000000012,
"min": 0.00014443060000000012,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000005,
"min": 0.0005777224000000005,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.272727272727273,
"min": 3.8863636363636362,
"max": 26.727272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1156.0,
"min": 171.0,
"max": 1470.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.272727272727273,
"min": 3.8863636363636362,
"max": 26.727272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1156.0,
"min": 171.0,
"max": 1470.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675762017",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675762481"
},
"total": 463.117938063,
"count": 1,
"self": 0.43254443800014997,
"children": {
"run_training.setup": {
"total": 0.10814192199995887,
"count": 1,
"self": 0.10814192199995887
},
"TrainerController.start_learning": {
"total": 462.5772517029999,
"count": 1,
"self": 0.5851602189906089,
"children": {
"TrainerController._reset_env": {
"total": 9.800682908999988,
"count": 1,
"self": 9.800682908999988
},
"TrainerController.advance": {
"total": 452.0324936450095,
"count": 18202,
"self": 0.340239563013256,
"children": {
"env_step": {
"total": 451.6922540819962,
"count": 18202,
"self": 304.53710158600006,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.85733423099668,
"count": 18202,
"self": 1.5128286989973958,
"children": {
"TorchPolicy.evaluate": {
"total": 145.3445055319993,
"count": 18202,
"self": 33.24556560098506,
"children": {
"TorchPolicy.sample_actions": {
"total": 112.09893993101423,
"count": 18202,
"self": 112.09893993101423
}
}
}
}
},
"workers": {
"total": 0.29781826499947783,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 461.0008334580143,
"count": 18202,
"is_parallel": true,
"self": 210.23632525902144,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00547346199994081,
"count": 1,
"is_parallel": true,
"self": 0.003189231999954245,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022842299999865645,
"count": 10,
"is_parallel": true,
"self": 0.0022842299999865645
}
}
},
"UnityEnvironment.step": {
"total": 0.0903859969999985,
"count": 1,
"is_parallel": true,
"self": 0.0005605559999821708,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003563430000212975,
"count": 1,
"is_parallel": true,
"self": 0.0003563430000212975
},
"communicator.exchange": {
"total": 0.08776066699999774,
"count": 1,
"is_parallel": true,
"self": 0.08776066699999774
},
"steps_from_proto": {
"total": 0.0017084309999972902,
"count": 1,
"is_parallel": true,
"self": 0.0004006379998600096,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013077930001372806,
"count": 10,
"is_parallel": true,
"self": 0.0013077930001372806
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 250.76450819899287,
"count": 18201,
"is_parallel": true,
"self": 9.843452205987091,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.3098874530036255,
"count": 18201,
"is_parallel": true,
"self": 5.3098874530036255
},
"communicator.exchange": {
"total": 203.4100343669944,
"count": 18201,
"is_parallel": true,
"self": 203.4100343669944
},
"steps_from_proto": {
"total": 32.201134173007745,
"count": 18201,
"is_parallel": true,
"self": 7.292056571006924,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.90907760200082,
"count": 182010,
"is_parallel": true,
"self": 24.90907760200082
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012462899985621334,
"count": 1,
"self": 0.00012462899985621334,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 448.0489555700391,
"count": 474258,
"is_parallel": true,
"self": 11.742948400960586,
"children": {
"process_trajectory": {
"total": 293.2144949620781,
"count": 474258,
"is_parallel": true,
"self": 292.2671304710781,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9473644910000303,
"count": 4,
"is_parallel": true,
"self": 0.9473644910000303
}
}
},
"_update_policy": {
"total": 143.0915122070004,
"count": 90,
"is_parallel": true,
"self": 64.29706306299806,
"children": {
"TorchPPOOptimizer.update": {
"total": 78.79444914400233,
"count": 2160,
"is_parallel": true,
"self": 78.79444914400233
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15879030099995362,
"count": 1,
"self": 0.0018259639998632338,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15696433700009038,
"count": 1,
"self": 0.15696433700009038
}
}
}
}
}
}
}