SnowballTarget / run_logs /timers.json
nachshonc's picture
firstCommit
4d2ad12
raw
history blame
19 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.005699634552002,
"min": 1.0049445629119873,
"max": 2.8581583499908447,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9602.419921875,
"min": 9602.419921875,
"max": 29238.9609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.138331413269043,
"min": 0.439153254032135,
"max": 13.138331413269043,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2561.974609375,
"min": 85.19573211669922,
"max": 2667.48974609375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06682304018677848,
"min": 0.06318777558472528,
"max": 0.07394380272670269,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2672921607471139,
"min": 0.25275110233890113,
"max": 0.36971901363351345,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21168423473250633,
"min": 0.15045997612954426,
"max": 0.27696398531689365,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8467369389300253,
"min": 0.601839904518177,
"max": 1.3848199265844683,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.931818181818183,
"min": 3.8636363636363638,
"max": 26.054545454545455,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1141.0,
"min": 170.0,
"max": 1433.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.931818181818183,
"min": 3.8636363636363638,
"max": 26.054545454545455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1141.0,
"min": 170.0,
"max": 1433.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674119665",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674120112"
},
"total": 446.43869328599993,
"count": 1,
"self": 0.3915354809999485,
"children": {
"run_training.setup": {
"total": 0.11942846099998405,
"count": 1,
"self": 0.11942846099998405
},
"TrainerController.start_learning": {
"total": 445.927729344,
"count": 1,
"self": 0.6151862049898114,
"children": {
"TrainerController._reset_env": {
"total": 9.883327596000015,
"count": 1,
"self": 9.883327596000015
},
"TrainerController.advance": {
"total": 435.30245007401004,
"count": 18202,
"self": 0.3169543530151486,
"children": {
"env_step": {
"total": 434.9854957209949,
"count": 18202,
"self": 287.43907266098716,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.24471781000165,
"count": 18202,
"self": 1.4795187950082322,
"children": {
"TorchPolicy.evaluate": {
"total": 145.7651990149934,
"count": 18202,
"self": 33.93978307899471,
"children": {
"TorchPolicy.sample_actions": {
"total": 111.8254159359987,
"count": 18202,
"self": 111.8254159359987
}
}
}
}
},
"workers": {
"total": 0.30170525000607995,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 444.54697358399466,
"count": 18202,
"is_parallel": true,
"self": 209.14988695199213,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006653902999971706,
"count": 1,
"is_parallel": true,
"self": 0.003930167000021356,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027237359999503497,
"count": 10,
"is_parallel": true,
"self": 0.0027237359999503497
}
}
},
"UnityEnvironment.step": {
"total": 0.042472591999967335,
"count": 1,
"is_parallel": true,
"self": 0.00033843799997157475,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003050390000112202,
"count": 1,
"is_parallel": true,
"self": 0.0003050390000112202
},
"communicator.exchange": {
"total": 0.04001076500003364,
"count": 1,
"is_parallel": true,
"self": 0.04001076500003364
},
"steps_from_proto": {
"total": 0.0018183499999508967,
"count": 1,
"is_parallel": true,
"self": 0.00043609199997263204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013822579999782647,
"count": 10,
"is_parallel": true,
"self": 0.0013822579999782647
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 235.39708663200253,
"count": 18201,
"is_parallel": true,
"self": 8.628276847007385,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.10889659299437,
"count": 18201,
"is_parallel": true,
"self": 5.10889659299437
},
"communicator.exchange": {
"total": 188.46792588100755,
"count": 18201,
"is_parallel": true,
"self": 188.46792588100755
},
"steps_from_proto": {
"total": 33.19198731099323,
"count": 18201,
"is_parallel": true,
"self": 6.984772530990767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.20721478000246,
"count": 182010,
"is_parallel": true,
"self": 26.20721478000246
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.497300003298733e-05,
"count": 1,
"self": 5.497300003298733e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 432.4641151959673,
"count": 358774,
"is_parallel": true,
"self": 9.394719670966765,
"children": {
"process_trajectory": {
"total": 250.37600252299995,
"count": 358774,
"is_parallel": true,
"self": 249.65628114800006,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7197213749998923,
"count": 4,
"is_parallel": true,
"self": 0.7197213749998923
}
}
},
"_update_policy": {
"total": 172.69339300200056,
"count": 90,
"is_parallel": true,
"self": 42.28814319399606,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.4052498080045,
"count": 4587,
"is_parallel": true,
"self": 130.4052498080045
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12671049600010065,
"count": 1,
"self": 0.001004685000111749,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1257058109999889,
"count": 1,
"self": 0.1257058109999889
}
}
}
}
}
}
}