ppo-Pyramids / run_logs /timers.json
niltheory's picture
Second Push
5814ad1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2701921761035919,
"min": 0.26977261900901794,
"max": 1.342741847038269,
"count": 62
},
"Pyramids.Policy.Entropy.sum": {
"value": 8174.9345703125,
"min": 8071.5966796875,
"max": 40733.41796875,
"count": 62
},
"Pyramids.Step.mean": {
"value": 1859988.0,
"min": 29952.0,
"max": 1859988.0,
"count": 62
},
"Pyramids.Step.sum": {
"value": 1859988.0,
"min": 29952.0,
"max": 1859988.0,
"count": 62
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6743399500846863,
"min": -0.10293690860271454,
"max": 0.7813393473625183,
"count": 62
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 190.8382110595703,
"min": -24.80779457092285,
"max": 229.71377563476562,
"count": 62
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010546749457716942,
"min": -0.02537567913532257,
"max": 0.5655151605606079,
"count": 62
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.9847300052642822,
"min": -6.978311538696289,
"max": 134.027099609375,
"count": 62
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06926958905560675,
"min": 0.06541073590397857,
"max": 0.07398475692918068,
"count": 62
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9697742467784944,
"min": 0.5138031122483441,
"max": 1.0930324257596844,
"count": 62
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013869414471195168,
"min": 7.39460481411504e-05,
"max": 0.015861346733499402,
"count": 62
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19417180259673236,
"min": 0.0010352446739761054,
"max": 0.22539597191458666,
"count": 62
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00011558203290124762,
"min": 0.00011558203290124762,
"max": 0.00029838354339596195,
"count": 62
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0016181484606174667,
"min": 0.0016181484606174667,
"max": 0.004117555127481666,
"count": 62
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.13852732380952382,
"min": 0.13852732380952382,
"max": 0.19946118095238097,
"count": 62
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.9393825333333334,
"min": 1.3962282666666668,
"max": 2.8725183333333333,
"count": 62
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0038588796485714287,
"min": 0.0038588796485714287,
"max": 0.009946171977142856,
"count": 62
},
"Pyramids.Policy.Beta.sum": {
"value": 0.05402431508,
"min": 0.05402431508,
"max": 0.1372645815,
"count": 62
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004745361395180225,
"min": 0.004745361395180225,
"max": 0.48182225227355957,
"count": 62
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.0664350613951683,
"min": 0.0664350613951683,
"max": 3.372755765914917,
"count": 62
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 294.90909090909093,
"min": 238.54545454545453,
"max": 999.0,
"count": 62
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29196.0,
"min": 15984.0,
"max": 32943.0,
"count": 62
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.644464625102101,
"min": -1.0000000521540642,
"max": 1.7505528300516005,
"count": 62
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 162.801997885108,
"min": -30.99880162626505,
"max": 215.31799809634686,
"count": 62
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.644464625102101,
"min": -1.0000000521540642,
"max": 1.7505528300516005,
"count": 62
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 162.801997885108,
"min": -30.99880162626505,
"max": 215.31799809634686,
"count": 62
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.014565808352963725,
"min": 0.013426007143239113,
"max": 10.341735620982945,
"count": 62
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.4420150269434089,
"min": 1.4420150269434089,
"max": 165.46776993572712,
"count": 62
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 62
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 62
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701149615",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701156332"
},
"total": 6716.385251668,
"count": 1,
"self": 0.5753307689992653,
"children": {
"run_training.setup": {
"total": 0.06343696700014334,
"count": 1,
"self": 0.06343696700014334
},
"TrainerController.start_learning": {
"total": 6715.746483932,
"count": 1,
"self": 4.68890463304615,
"children": {
"TrainerController._reset_env": {
"total": 2.810407146999978,
"count": 1,
"self": 2.810407146999978
},
"TrainerController.advance": {
"total": 6707.932427557955,
"count": 119611,
"self": 4.952084283920158,
"children": {
"env_step": {
"total": 4519.511307172119,
"count": 119611,
"self": 4196.15482896635,
"children": {
"SubprocessEnvManager._take_step": {
"total": 320.45771707092877,
"count": 119611,
"self": 14.175374426915141,
"children": {
"TorchPolicy.evaluate": {
"total": 306.2823426440136,
"count": 116385,
"self": 306.2823426440136
}
}
},
"workers": {
"total": 2.898761134840015,
"count": 119610,
"self": 0.0,
"children": {
"worker_root": {
"total": 6702.855181220978,
"count": 119610,
"is_parallel": true,
"self": 2854.209973548025,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027925460001370084,
"count": 1,
"is_parallel": true,
"self": 0.000987173000112307,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018053730000247015,
"count": 8,
"is_parallel": true,
"self": 0.0018053730000247015
}
}
},
"UnityEnvironment.step": {
"total": 0.06999408999990919,
"count": 1,
"is_parallel": true,
"self": 0.0006811949999701028,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005803039998681925,
"count": 1,
"is_parallel": true,
"self": 0.0005803039998681925
},
"communicator.exchange": {
"total": 0.0665962479999962,
"count": 1,
"is_parallel": true,
"self": 0.0665962479999962
},
"steps_from_proto": {
"total": 0.002136343000074703,
"count": 1,
"is_parallel": true,
"self": 0.00047315000006165064,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016631930000130524,
"count": 8,
"is_parallel": true,
"self": 0.0016631930000130524
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3848.645207672953,
"count": 119609,
"is_parallel": true,
"self": 88.9310091739726,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 54.6570962079079,
"count": 119609,
"is_parallel": true,
"self": 54.6570962079079
},
"communicator.exchange": {
"total": 3458.489453091087,
"count": 119609,
"is_parallel": true,
"self": 3458.489453091087
},
"steps_from_proto": {
"total": 246.56764919998568,
"count": 119609,
"is_parallel": true,
"self": 53.64709878953158,
"children": {
"_process_rank_one_or_two_observation": {
"total": 192.9205504104541,
"count": 956872,
"is_parallel": true,
"self": 192.9205504104541
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2183.4690361019157,
"count": 119610,
"self": 9.115496366744082,
"children": {
"process_trajectory": {
"total": 332.01113409218306,
"count": 119610,
"self": 331.7326709091835,
"children": {
"RLTrainer._checkpoint": {
"total": 0.27846318299953055,
"count": 3,
"self": 0.27846318299953055
}
}
},
"_update_policy": {
"total": 1842.3424056429885,
"count": 857,
"self": 742.2272737299331,
"children": {
"TorchPPOOptimizer.update": {
"total": 1100.1151319130554,
"count": 42411,
"self": 1100.1151319130554
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.481999788666144e-06,
"count": 1,
"self": 1.481999788666144e-06
},
"TrainerController._save_models": {
"total": 0.3147431119996327,
"count": 1,
"self": 0.009042426999258169,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3057006850003745,
"count": 1,
"self": 0.3057006850003745
}
}
}
}
}
}
}