eolang's picture
3 epochs, .95 gamma
906bc2e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9918809533119202,
"min": 0.9918809533119202,
"max": 2.857837438583374,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9470.4794921875,
"min": 9470.4794921875,
"max": 29267.11328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.6137888431549072,
"min": 0.09594723582267761,
"max": 2.6137888431549072,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 509.6888122558594,
"min": 18.6137638092041,
"max": 535.4180908203125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07066745451733118,
"min": 0.06357473697154967,
"max": 0.08029124250808986,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28266981806932473,
"min": 0.2601032060552183,
"max": 0.40145621254044933,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.12782912854762638,
"min": 0.07035147774092597,
"max": 0.1652711736513119,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5113165141905055,
"min": 0.2814059109637039,
"max": 0.8165607580951617,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.568181818181817,
"min": 3.3863636363636362,
"max": 26.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1125.0,
"min": 149.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.568181818181817,
"min": 3.3863636363636362,
"max": 26.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1125.0,
"min": 149.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675409275",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675409719"
},
"total": 444.4077789329999,
"count": 1,
"self": 0.4440869569999677,
"children": {
"run_training.setup": {
"total": 0.10949972200000957,
"count": 1,
"self": 0.10949972200000957
},
"TrainerController.start_learning": {
"total": 443.85419225399994,
"count": 1,
"self": 0.5255944369843064,
"children": {
"TrainerController._reset_env": {
"total": 9.515988249999964,
"count": 1,
"self": 9.515988249999964
},
"TrainerController.advance": {
"total": 433.6940230960156,
"count": 18201,
"self": 0.26724765800588557,
"children": {
"env_step": {
"total": 433.4267754380097,
"count": 18201,
"self": 284.6316122170215,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.52373155998987,
"count": 18201,
"self": 1.4308216479880684,
"children": {
"TorchPolicy.evaluate": {
"total": 147.0929099120018,
"count": 18201,
"self": 32.52677706400891,
"children": {
"TorchPolicy.sample_actions": {
"total": 114.56613284799289,
"count": 18201,
"self": 114.56613284799289
}
}
}
}
},
"workers": {
"total": 0.2714316609983598,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 442.59428941701253,
"count": 18201,
"is_parallel": true,
"self": 213.25332859400123,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006517118000033406,
"count": 1,
"is_parallel": true,
"self": 0.0036149250000789834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0029021929999544227,
"count": 10,
"is_parallel": true,
"self": 0.0029021929999544227
}
}
},
"UnityEnvironment.step": {
"total": 0.045978705000038644,
"count": 1,
"is_parallel": true,
"self": 0.00042821399995318643,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003762840000263168,
"count": 1,
"is_parallel": true,
"self": 0.0003762840000263168
},
"communicator.exchange": {
"total": 0.043343164000020806,
"count": 1,
"is_parallel": true,
"self": 0.043343164000020806
},
"steps_from_proto": {
"total": 0.0018310430000383349,
"count": 1,
"is_parallel": true,
"self": 0.00040770500004327914,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014233379999950557,
"count": 10,
"is_parallel": true,
"self": 0.0014233379999950557
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 229.3409608230113,
"count": 18200,
"is_parallel": true,
"self": 8.586892338026473,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.257054132001485,
"count": 18200,
"is_parallel": true,
"self": 5.257054132001485
},
"communicator.exchange": {
"total": 181.1053031699995,
"count": 18200,
"is_parallel": true,
"self": 181.1053031699995
},
"steps_from_proto": {
"total": 34.39171118298384,
"count": 18200,
"is_parallel": true,
"self": 6.958180925002239,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.4335302579816,
"count": 182000,
"is_parallel": true,
"self": 27.4335302579816
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.854100006923545e-05,
"count": 1,
"self": 4.854100006923545e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 430.59516504495866,
"count": 352807,
"is_parallel": true,
"self": 9.562011162931071,
"children": {
"process_trajectory": {
"total": 247.46545252802764,
"count": 352807,
"is_parallel": true,
"self": 246.67863218602747,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7868203420001691,
"count": 4,
"is_parallel": true,
"self": 0.7868203420001691
}
}
},
"_update_policy": {
"total": 173.56770135399995,
"count": 90,
"is_parallel": true,
"self": 42.50357376500267,
"children": {
"TorchPPOOptimizer.update": {
"total": 131.06412758899728,
"count": 4587,
"is_parallel": true,
"self": 131.06412758899728
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11853793000000223,
"count": 1,
"self": 0.0007968219999838766,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11774110800001836,
"count": 1,
"self": 0.11774110800001836
}
}
}
}
}
}
}