testpyramidsrnd / run_logs /timers.json
mariastull's picture
First Pyramids
d66a3dc
raw
history blame
19.3 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.18549159169197083,
"min": 0.16501900553703308,
"max": 1.4877912998199463,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5514.2939453125,
"min": 4918.88671875,
"max": 45133.63671875,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999986.0,
"min": 29952.0,
"max": 2999986.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999986.0,
"min": 29952.0,
"max": 2999986.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8491765260696411,
"min": -0.16802190244197845,
"max": 0.8910682797431946,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 258.149658203125,
"min": -39.821189880371094,
"max": 278.0133056640625,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.002368563786149025,
"min": -0.010633742436766624,
"max": 0.4445153772830963,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.720043420791626,
"min": -3.317727565765381,
"max": 105.35014343261719,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06774280974960198,
"min": 0.063515446266614,
"max": 0.07441755345303698,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9483993364944276,
"min": 0.5209228741712588,
"max": 1.0731008608903116,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015869215046959217,
"min": 0.00018658240380568724,
"max": 0.016393833552553718,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22216901065742906,
"min": 0.002425571249473934,
"max": 0.22951366973575205,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4967637868261916e-06,
"min": 1.4967637868261916e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.0954693015566682e-05,
"min": 2.0954693015566682e-05,
"max": 0.004011072962975699,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049888809523812,
"min": 0.10049888809523812,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4069844333333337,
"min": 1.3962282666666668,
"max": 2.7374939000000005,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.983892071428576e-05,
"min": 5.983892071428576e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008377448900000006,
"min": 0.0008377448900000006,
"max": 0.13370872757000002,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006374638061970472,
"min": 0.0059983269311487675,
"max": 0.5016703009605408,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08924493193626404,
"min": 0.08397657424211502,
"max": 3.5116920471191406,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 210.52083333333334,
"min": 198.3046357615894,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30315.0,
"min": 15984.0,
"max": 32538.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7615020881061787,
"min": -1.0000000521540642,
"max": 1.7970890263988548,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 251.89479859918356,
"min": -29.996201567351818,
"max": 268.15519765019417,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7615020881061787,
"min": -1.0000000521540642,
"max": 1.7970890263988548,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 251.89479859918356,
"min": -29.996201567351818,
"max": 268.15519765019417,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.014017653583507851,
"min": 0.012781343650102726,
"max": 10.748835414648056,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.0045244624416227,
"min": 1.8405134856147924,
"max": 171.9813666343689,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657570910",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1657577491"
},
"total": 6581.106113971001,
"count": 1,
"self": 0.4372175070011508,
"children": {
"run_training.setup": {
"total": 0.04052239399993596,
"count": 1,
"self": 0.04052239399993596
},
"TrainerController.start_learning": {
"total": 6580.62837407,
"count": 1,
"self": 4.034038596162645,
"children": {
"TrainerController._reset_env": {
"total": 9.61948098899984,
"count": 1,
"self": 9.61948098899984
},
"TrainerController.advance": {
"total": 6566.880816513838,
"count": 195480,
"self": 4.289930104000632,
"children": {
"env_step": {
"total": 4432.43372245399,
"count": 195480,
"self": 4121.725324592711,
"children": {
"SubprocessEnvManager._take_step": {
"total": 308.59892481496786,
"count": 195480,
"self": 13.548583634909392,
"children": {
"TorchPolicy.evaluate": {
"total": 295.05034118005847,
"count": 187549,
"self": 101.87077117030117,
"children": {
"TorchPolicy.sample_actions": {
"total": 193.1795700097573,
"count": 187549,
"self": 193.1795700097573
}
}
}
}
},
"workers": {
"total": 2.1094730463109954,
"count": 195480,
"self": 0.0,
"children": {
"worker_root": {
"total": 6569.793024105923,
"count": 195480,
"is_parallel": true,
"self": 2740.6610778758145,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005194442999936655,
"count": 1,
"is_parallel": true,
"self": 0.003949753999904715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00124468900003194,
"count": 8,
"is_parallel": true,
"self": 0.00124468900003194
}
}
},
"UnityEnvironment.step": {
"total": 0.04604754000001776,
"count": 1,
"is_parallel": true,
"self": 0.0005065469999863126,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004288699999506207,
"count": 1,
"is_parallel": true,
"self": 0.0004288699999506207
},
"communicator.exchange": {
"total": 0.04345994899995276,
"count": 1,
"is_parallel": true,
"self": 0.04345994899995276
},
"steps_from_proto": {
"total": 0.0016521740001280705,
"count": 1,
"is_parallel": true,
"self": 0.00042566400020405126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012265099999240192,
"count": 8,
"is_parallel": true,
"self": 0.0012265099999240192
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3829.1319462301085,
"count": 195479,
"is_parallel": true,
"self": 82.57355704394695,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 67.52475922283043,
"count": 195479,
"is_parallel": true,
"self": 67.52475922283043
},
"communicator.exchange": {
"total": 3404.435974938121,
"count": 195479,
"is_parallel": true,
"self": 3404.435974938121
},
"steps_from_proto": {
"total": 274.5976550252101,
"count": 195479,
"is_parallel": true,
"self": 67.90889252627176,
"children": {
"_process_rank_one_or_two_observation": {
"total": 206.68876249893833,
"count": 1563832,
"is_parallel": true,
"self": 206.68876249893833
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2130.157163955847,
"count": 195480,
"self": 7.822217081870349,
"children": {
"process_trajectory": {
"total": 495.59853101397266,
"count": 195480,
"self": 495.02613327397285,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5723977399998148,
"count": 6,
"self": 0.5723977399998148
}
}
},
"_update_policy": {
"total": 1626.736415860004,
"count": 1400,
"self": 643.8071453301063,
"children": {
"TorchPPOOptimizer.update": {
"total": 982.9292705298976,
"count": 68376,
"self": 982.9292705298976
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1539996194187552e-06,
"count": 1,
"self": 1.1539996194187552e-06
},
"TrainerController._save_models": {
"total": 0.09403681699950539,
"count": 1,
"self": 0.0017135369998868555,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09232327999961853,
"count": 1,
"self": 0.09232327999961853
}
}
}
}
}
}
}