drakrig's picture
First Push
0ae457c verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8994150161743164,
"min": 0.8685795664787292,
"max": 2.8722782135009766,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9131.7607421875,
"min": 8274.0888671875,
"max": 29415.0,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.722460746765137,
"min": 0.26966437697410583,
"max": 12.722460746765137,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2480.8798828125,
"min": 52.31488800048828,
"max": 2604.244140625,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.09898542262289638,
"min": 0.0893245171641063,
"max": 0.10430966530884866,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3959416904915855,
"min": 0.3572980686564252,
"max": 0.5215483265442433,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17855720992620083,
"min": 0.11213788322593067,
"max": 0.2533063365548265,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7142288397048033,
"min": 0.4485515329037227,
"max": 1.2665316827741324,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.5940987029999965e-06,
"min": 2.5940987029999965e-06,
"max": 0.000197294001353,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.0376394811999986e-05,
"min": 1.0376394811999986e-05,
"max": 0.0009617200191400001,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10129700000000001,
"min": 0.10129700000000001,
"max": 0.19864699999999996,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40518800000000005,
"min": 0.40518800000000005,
"max": 0.9808600000000002,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.472029999999994e-05,
"min": 7.472029999999994e-05,
"max": 0.0049324853,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00029888119999999977,
"min": 0.00029888119999999977,
"max": 0.024044913999999997,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.795454545454547,
"min": 3.2954545454545454,
"max": 25.12962962962963,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1091.0,
"min": 145.0,
"max": 1379.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.795454545454547,
"min": 3.2954545454545454,
"max": 25.12962962962963,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1091.0,
"min": 145.0,
"max": 1379.0,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709374777",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709375707"
},
"total": 930.3749521529999,
"count": 1,
"self": 0.8077687090003565,
"children": {
"run_training.setup": {
"total": 0.05089191899969592,
"count": 1,
"self": 0.05089191899969592
},
"TrainerController.start_learning": {
"total": 929.5162915249998,
"count": 1,
"self": 1.1493701499266535,
"children": {
"TrainerController._reset_env": {
"total": 3.0749490230000447,
"count": 1,
"self": 3.0749490230000447
},
"TrainerController.advance": {
"total": 925.1717356430731,
"count": 36399,
"self": 0.5078730520722274,
"children": {
"env_step": {
"total": 924.6638625910009,
"count": 36399,
"self": 645.2641434810125,
"children": {
"SubprocessEnvManager._take_step": {
"total": 278.8481764129092,
"count": 36399,
"self": 2.805793646903112,
"children": {
"TorchPolicy.evaluate": {
"total": 276.0423827660061,
"count": 36399,
"self": 276.0423827660061
}
}
},
"workers": {
"total": 0.551542697079185,
"count": 36399,
"self": 0.0,
"children": {
"worker_root": {
"total": 927.374942489078,
"count": 36399,
"is_parallel": true,
"self": 499.92885281904273,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005414814999767259,
"count": 1,
"is_parallel": true,
"self": 0.003975891999289161,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014389230004780984,
"count": 10,
"is_parallel": true,
"self": 0.0014389230004780984
}
}
},
"UnityEnvironment.step": {
"total": 0.05925037599990901,
"count": 1,
"is_parallel": true,
"self": 0.0006171290001475427,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000381070000003092,
"count": 1,
"is_parallel": true,
"self": 0.000381070000003092
},
"communicator.exchange": {
"total": 0.05633517399974153,
"count": 1,
"is_parallel": true,
"self": 0.05633517399974153
},
"steps_from_proto": {
"total": 0.001917003000016848,
"count": 1,
"is_parallel": true,
"self": 0.000384973999189242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001532029000827606,
"count": 10,
"is_parallel": true,
"self": 0.001532029000827606
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 427.44608967003524,
"count": 36398,
"is_parallel": true,
"self": 20.59565269400673,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.535106480023387,
"count": 36398,
"is_parallel": true,
"self": 10.535106480023387
},
"communicator.exchange": {
"total": 330.9306253589798,
"count": 36398,
"is_parallel": true,
"self": 330.9306253589798
},
"steps_from_proto": {
"total": 65.38470513702532,
"count": 36398,
"is_parallel": true,
"self": 11.962407448971135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.422297688054186,
"count": 363980,
"is_parallel": true,
"self": 53.422297688054186
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00023636300011276035,
"count": 1,
"self": 0.00023636300011276035,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 914.36439917485,
"count": 1308593,
"is_parallel": true,
"self": 27.35198290780636,
"children": {
"process_trajectory": {
"total": 457.7374168880442,
"count": 1308593,
"is_parallel": true,
"self": 456.86231000704356,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8751068810006473,
"count": 8,
"is_parallel": true,
"self": 0.8751068810006473
}
}
},
"_update_policy": {
"total": 429.2749993789994,
"count": 181,
"is_parallel": true,
"self": 98.25419236201242,
"children": {
"TorchPPOOptimizer.update": {
"total": 331.02080701698696,
"count": 18459,
"is_parallel": true,
"self": 331.02080701698696
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12000034599986975,
"count": 1,
"self": 0.0008530370005246368,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11914730899934511,
"count": 1,
"self": 0.11914730899934511
}
}
}
}
}
}
}