tung491's picture
FirstPush
a3f0493 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.47822296619415283,
"min": 0.38711053133010864,
"max": 1.4194719791412354,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14316.0830078125,
"min": 11557.572265625,
"max": 43061.1015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.21990154683589935,
"min": -0.17022055387496948,
"max": 0.2633354067802429,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 55.63508987426758,
"min": -40.34226989746094,
"max": 68.46720886230469,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.8864403367042542,
"min": -0.8864403367042542,
"max": 0.35417696833610535,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -224.2694091796875,
"min": -224.2694091796875,
"max": 85.71082305908203,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07116634851194074,
"min": 0.06472399017989769,
"max": 0.07282879728225847,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9963288791671705,
"min": 0.5023583531268971,
"max": 1.092431959233877,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.1822836324156794,
"min": 0.0006305802573615309,
"max": 0.1822836324156794,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 2.551970853819512,
"min": 0.008828123603061433,
"max": 2.551970853819512,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.37647611263571e-06,
"min": 7.37647611263571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010327066557689994,
"min": 0.00010327066557689994,
"max": 0.0036091341969553,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245879285714285,
"min": 0.10245879285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344230999999998,
"min": 1.3886848,
"max": 2.5696692000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002556334064285713,
"min": 0.0002556334064285713,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035788676899999984,
"min": 0.0035788676899999984,
"max": 0.12031416553000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01144364383071661,
"min": 0.010052837431430817,
"max": 0.48294416069984436,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1602110117673874,
"min": 0.14073972404003143,
"max": 3.3806090354919434,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 623.5510204081633,
"min": 522.1296296296297,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30554.0,
"min": 15984.0,
"max": 33253.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8048856811864036,
"min": -1.0000000521540642,
"max": 1.0820290599356999,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 39.439398378133774,
"min": -30.273201622068882,
"max": 63.79859830439091,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8048856811864036,
"min": -1.0000000521540642,
"max": 1.0820290599356999,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 39.439398378133774,
"min": -30.273201622068882,
"max": 63.79859830439091,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07171139016638187,
"min": 0.06554952464454497,
"max": 9.08528567943722,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5138581181527115,
"min": 3.3712199871079065,
"max": 145.36457087099552,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711370830",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/home/tung491/env/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711372944"
},
"total": 2114.196695728,
"count": 1,
"self": 0.3704457110002295,
"children": {
"run_training.setup": {
"total": 0.01752910300001531,
"count": 1,
"self": 0.01752910300001531
},
"TrainerController.start_learning": {
"total": 2113.808720914,
"count": 1,
"self": 1.3554383020195928,
"children": {
"TrainerController._reset_env": {
"total": 4.6445250249998935,
"count": 1,
"self": 4.6445250249998935
},
"TrainerController.advance": {
"total": 2107.7329056849803,
"count": 63458,
"self": 1.3103259979125141,
"children": {
"env_step": {
"total": 1253.326523833032,
"count": 63458,
"self": 1022.5726646679891,
"children": {
"SubprocessEnvManager._take_step": {
"total": 229.88777125997217,
"count": 63458,
"self": 4.940127104963494,
"children": {
"TorchPolicy.evaluate": {
"total": 224.94764415500867,
"count": 62567,
"self": 224.94764415500867
}
}
},
"workers": {
"total": 0.866087905070799,
"count": 63458,
"self": 0.0,
"children": {
"worker_root": {
"total": 2110.8223348129527,
"count": 63458,
"is_parallel": true,
"self": 1182.8103447539456,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020514999998795247,
"count": 1,
"is_parallel": true,
"self": 0.0011021000000255299,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009493999998539948,
"count": 8,
"is_parallel": true,
"self": 0.0009493999998539948
}
}
},
"UnityEnvironment.step": {
"total": 0.03209880500003237,
"count": 1,
"is_parallel": true,
"self": 0.0002471999996487284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002344000001812674,
"count": 1,
"is_parallel": true,
"self": 0.0002344000001812674
},
"communicator.exchange": {
"total": 0.030760005000047386,
"count": 1,
"is_parallel": true,
"self": 0.030760005000047386
},
"steps_from_proto": {
"total": 0.0008572000001549895,
"count": 1,
"is_parallel": true,
"self": 0.00023239999995894323,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006248000001960463,
"count": 8,
"is_parallel": true,
"self": 0.0006248000001960463
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 928.0119900590071,
"count": 63457,
"is_parallel": true,
"self": 16.010251934043026,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.555226317939287,
"count": 63457,
"is_parallel": true,
"self": 12.555226317939287
},
"communicator.exchange": {
"total": 849.4656363850502,
"count": 63457,
"is_parallel": true,
"self": 849.4656363850502
},
"steps_from_proto": {
"total": 49.98087542197459,
"count": 63457,
"is_parallel": true,
"self": 12.850171524971756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.13070389700283,
"count": 507656,
"is_parallel": true,
"self": 37.13070389700283
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 853.0960558540355,
"count": 63458,
"self": 2.607850197067819,
"children": {
"process_trajectory": {
"total": 127.66161222496635,
"count": 63458,
"self": 127.452523821966,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2090884030003508,
"count": 2,
"self": 0.2090884030003508
}
}
},
"_update_policy": {
"total": 722.8265934320013,
"count": 451,
"self": 296.4367549840033,
"children": {
"TorchPPOOptimizer.update": {
"total": 426.38983844799805,
"count": 22773,
"self": 426.38983844799805
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.000000318337698e-07,
"count": 1,
"self": 9.000000318337698e-07
},
"TrainerController._save_models": {
"total": 0.0758510020000358,
"count": 1,
"self": 0.0009024000000863452,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07494860199994946,
"count": 1,
"self": 0.07494860199994946
}
}
}
}
}
}
}