ppo-Pyramids / run_logs /timers.json
tranquocthanh's picture
First Push
2377423
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5535457134246826,
"min": 0.5535457134246826,
"max": 1.4745616912841797,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16526.66015625,
"min": 16526.66015625,
"max": 44732.3046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989944.0,
"min": 29952.0,
"max": 989944.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989944.0,
"min": 29952.0,
"max": 989944.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.35023078322410583,
"min": -0.1144992932677269,
"max": 0.35023078322410583,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 91.76046752929688,
"min": -27.594329833984375,
"max": 91.76046752929688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0004190222534816712,
"min": -0.0004190222534816712,
"max": 0.35449734330177307,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.1097838282585144,
"min": -0.1097838282585144,
"max": 84.015869140625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06860575317807074,
"min": 0.06259277691509958,
"max": 0.07322617392851107,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9604805444929904,
"min": 0.4982663270587171,
"max": 1.0897080612268444,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011853492382215334,
"min": 0.0005287973702003898,
"max": 0.012316144136493474,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16594889335101468,
"min": 0.005043216571224569,
"max": 0.17890678551852676,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.25268329675714e-06,
"min": 7.25268329675714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010153756615459997,
"min": 0.00010153756615459997,
"max": 0.0030057732980757006,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241752857142858,
"min": 0.10241752857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338454,
"min": 1.3691136000000002,
"max": 2.4019243,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002515111042857143,
"min": 0.0002515111042857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035211554599999997,
"min": 0.0035211554599999997,
"max": 0.10023223757,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010278454050421715,
"min": 0.010278454050421715,
"max": 0.48938798904418945,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1438983529806137,
"min": 0.1438983529806137,
"max": 3.425715923309326,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 511.271186440678,
"min": 502.51666666666665,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30165.0,
"min": 15984.0,
"max": 33702.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3191728679052854,
"min": -1.0000000521540642,
"max": 1.3641166374087335,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 77.83119920641184,
"min": -32.000001668930054,
"max": 81.846998244524,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3191728679052854,
"min": -1.0000000521540642,
"max": 1.3641166374087335,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 77.83119920641184,
"min": -32.000001668930054,
"max": 81.846998244524,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05487600586799336,
"min": 0.05487600586799336,
"max": 10.391977360472083,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2376843462116085,
"min": 3.2376843462116085,
"max": 166.27163776755333,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703175014",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703177211"
},
"total": 2197.851266036,
"count": 1,
"self": 0.47599694500013356,
"children": {
"run_training.setup": {
"total": 0.07093509300011647,
"count": 1,
"self": 0.07093509300011647
},
"TrainerController.start_learning": {
"total": 2197.304333998,
"count": 1,
"self": 1.4537739760344266,
"children": {
"TrainerController._reset_env": {
"total": 2.232617993999611,
"count": 1,
"self": 2.232617993999611
},
"TrainerController.advance": {
"total": 2193.5318336249657,
"count": 63403,
"self": 1.468862188919502,
"children": {
"env_step": {
"total": 1566.016204508092,
"count": 63403,
"self": 1433.603659619218,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.5500175119828,
"count": 63403,
"self": 4.853619912929844,
"children": {
"TorchPolicy.evaluate": {
"total": 126.69639759905294,
"count": 62577,
"self": 126.69639759905294
}
}
},
"workers": {
"total": 0.8625273768911939,
"count": 63403,
"self": 0.0,
"children": {
"worker_root": {
"total": 2192.056339243994,
"count": 63403,
"is_parallel": true,
"self": 880.8666079998761,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016709649999029352,
"count": 1,
"is_parallel": true,
"self": 0.0005023979997531569,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011685670001497783,
"count": 8,
"is_parallel": true,
"self": 0.0011685670001497783
}
}
},
"UnityEnvironment.step": {
"total": 0.052397217999896384,
"count": 1,
"is_parallel": true,
"self": 0.0006090150004638417,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005454329998428875,
"count": 1,
"is_parallel": true,
"self": 0.0005454329998428875
},
"communicator.exchange": {
"total": 0.049471731999801705,
"count": 1,
"is_parallel": true,
"self": 0.049471731999801705
},
"steps_from_proto": {
"total": 0.00177103799978795,
"count": 1,
"is_parallel": true,
"self": 0.00037757399968540994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00139346400010254,
"count": 8,
"is_parallel": true,
"self": 0.00139346400010254
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1311.1897312441179,
"count": 63402,
"is_parallel": true,
"self": 35.278884253183605,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.138991849048125,
"count": 63402,
"is_parallel": true,
"self": 25.138991849048125
},
"communicator.exchange": {
"total": 1149.2807449889265,
"count": 63402,
"is_parallel": true,
"self": 1149.2807449889265
},
"steps_from_proto": {
"total": 101.49111015295966,
"count": 63402,
"is_parallel": true,
"self": 20.78277449207644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.70833566088322,
"count": 507216,
"is_parallel": true,
"self": 80.70833566088322
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 626.0467669279542,
"count": 63403,
"self": 2.6185832319724796,
"children": {
"process_trajectory": {
"total": 126.86745678298394,
"count": 63403,
"self": 126.62077571198415,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24668107099978442,
"count": 2,
"self": 0.24668107099978442
}
}
},
"_update_policy": {
"total": 496.5607269129978,
"count": 433,
"self": 296.4529802809984,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.10774663199936,
"count": 22806,
"self": 200.10774663199936
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0140001904801466e-06,
"count": 1,
"self": 1.0140001904801466e-06
},
"TrainerController._save_models": {
"total": 0.08610738900006254,
"count": 1,
"self": 0.0014629239994974341,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0846444650005651,
"count": 1,
"self": 0.0846444650005651
}
}
}
}
}
}
}