rnd-Pyramids / run_logs /timers.json
jamesup's picture
Trained Pyramids with Random Network Distillation
9f8687a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.40762531757354736,
"min": 0.3326530158519745,
"max": 1.58945631980896,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4134.951171875,
"min": 3267.983154296875,
"max": 16276.0322265625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 999991.0,
"min": 9984.0,
"max": 999991.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 999991.0,
"min": 9984.0,
"max": 999991.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5587826371192932,
"min": -0.3401697278022766,
"max": 0.7835901379585266,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 51.40800094604492,
"min": -26.533239364624023,
"max": 78.35901641845703,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.019848734140396118,
"min": -0.02832069806754589,
"max": 0.30685386061668396,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.8260835409164429,
"min": -2.6621456146240234,
"max": 24.855161666870117,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07367817766013711,
"min": 0.059008057058478405,
"max": 0.0816483677143429,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.29471271064054844,
"min": 0.12658978240890964,
"max": 0.37343313286934665,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015939953426520027,
"min": 0.0001965325012812244,
"max": 0.020706950046587735,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.06375981370608011,
"min": 0.0006076172915830587,
"max": 0.10353475023293868,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5350494883499983e-06,
"min": 1.5350494883499983e-06,
"max": 0.0002981568006144,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 6.140197953399993e-06,
"min": 6.140197953399993e-06,
"max": 0.0013131327622890997,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10051165000000001,
"min": 0.10051165000000001,
"max": 0.1993856,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.40204660000000003,
"min": 0.385152,
"max": 0.9377109000000001,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.111383499999997e-05,
"min": 6.111383499999997e-05,
"max": 0.00993862144,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00024445533999999986,
"min": 0.00024445533999999986,
"max": 0.04377731891000001,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011335862800478935,
"min": 0.011335862800478935,
"max": 0.5757608413696289,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.04534345120191574,
"min": 0.04534345120191574,
"max": 1.1515216827392578,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 399.3333333333333,
"min": 236.0487804878049,
"max": 999.0,
"count": 97
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 11980.0,
"min": 471.0,
"max": 15984.0,
"count": 97
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4706689322303081,
"min": -1.0000000521540642,
"max": 1.7639512005375653,
"count": 97
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 42.649399034678936,
"min": -16.000000834465027,
"max": 75.09999920427799,
"count": 97
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4706689322303081,
"min": -1.0000000521540642,
"max": 1.7639512005375653,
"count": 97
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 42.649399034678936,
"min": -16.000000834465027,
"max": 75.09999920427799,
"count": 97
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04590241284037394,
"min": 0.031273852029343976,
"max": 6.192862309515476,
"count": 97
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.3311699723708443,
"min": 0.879071524093888,
"max": 99.08579695224762,
"count": 97
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676471432",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/home/jamesup/Documents/source/deep-rl-class/env/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./envs/Pyramids/Pyramids --run-id=PyramidsRND --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu116",
"numpy_version": "1.23.3",
"end_time_seconds": "1676473864"
},
"total": 2432.8907376410207,
"count": 1,
"self": 0.42925374303013086,
"children": {
"run_training.setup": {
"total": 0.061697887955233455,
"count": 1,
"self": 0.061697887955233455
},
"TrainerController.start_learning": {
"total": 2432.3997860100353,
"count": 1,
"self": 1.2320784685434774,
"children": {
"TrainerController._reset_env": {
"total": 2.91117098799441,
"count": 1,
"self": 2.91117098799441
},
"TrainerController.advance": {
"total": 2428.1994277925114,
"count": 64183,
"self": 1.0450610908446833,
"children": {
"env_step": {
"total": 1466.2239785917918,
"count": 64183,
"self": 1177.333488754637,
"children": {
"SubprocessEnvManager._take_step": {
"total": 288.13038323266665,
"count": 64183,
"self": 3.2908897219458595,
"children": {
"TorchPolicy.evaluate": {
"total": 284.8394935107208,
"count": 62565,
"self": 203.39305339194834,
"children": {
"TorchPolicy.sample_actions": {
"total": 81.44644011877244,
"count": 62565,
"self": 81.44644011877244
}
}
}
}
},
"workers": {
"total": 0.7601066044881009,
"count": 64183,
"self": 0.0,
"children": {
"worker_root": {
"total": 2430.045870592585,
"count": 64183,
"is_parallel": true,
"self": 1349.298764889536,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0014870360027998686,
"count": 1,
"is_parallel": true,
"self": 0.00046307407319545746,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010239619296044111,
"count": 8,
"is_parallel": true,
"self": 0.0010239619296044111
}
}
},
"UnityEnvironment.step": {
"total": 0.03899501799605787,
"count": 1,
"is_parallel": true,
"self": 0.0004992769681848586,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037968100514262915,
"count": 1,
"is_parallel": true,
"self": 0.00037968100514262915
},
"communicator.exchange": {
"total": 0.03674200898967683,
"count": 1,
"is_parallel": true,
"self": 0.03674200898967683
},
"steps_from_proto": {
"total": 0.0013740510330535471,
"count": 1,
"is_parallel": true,
"self": 0.0003387421602383256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010353088728152215,
"count": 8,
"is_parallel": true,
"self": 0.0010353088728152215
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1080.7471057030489,
"count": 64182,
"is_parallel": true,
"self": 35.62455430190312,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.778342747536954,
"count": 64182,
"is_parallel": true,
"self": 24.778342747536954
},
"communicator.exchange": {
"total": 930.2868377896957,
"count": 64182,
"is_parallel": true,
"self": 930.2868377896957
},
"steps_from_proto": {
"total": 90.05737086391309,
"count": 64182,
"is_parallel": true,
"self": 19.627064865257125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.43030599865597,
"count": 513456,
"is_parallel": true,
"self": 70.43030599865597
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 960.9303881098749,
"count": 64183,
"self": 2.3728120194282383,
"children": {
"process_trajectory": {
"total": 185.53242368251085,
"count": 64183,
"self": 185.26967672648607,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2627469560247846,
"count": 2,
"self": 0.2627469560247846
}
}
},
"_update_policy": {
"total": 773.0251524079358,
"count": 443,
"self": 147.64772564132,
"children": {
"TorchPPOOptimizer.update": {
"total": 625.3774267666158,
"count": 22788,
"self": 625.3774267666158
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.799826562404633e-07,
"count": 1,
"self": 7.799826562404633e-07
},
"TrainerController._save_models": {
"total": 0.057107981003355235,
"count": 1,
"self": 0.0014307640376500785,
"children": {
"RLTrainer._checkpoint": {
"total": 0.055677216965705156,
"count": 1,
"self": 0.055677216965705156
}
}
}
}
}
}
}