GGunjan's picture
First push
2cd8a67
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2624301016330719,
"min": 0.2503872513771057,
"max": 1.415424108505249,
"count": 66
},
"Pyramids.Policy.Entropy.sum": {
"value": 7809.91943359375,
"min": 7479.56787109375,
"max": 42938.3046875,
"count": 66
},
"Pyramids.Step.mean": {
"value": 1979972.0,
"min": 29952.0,
"max": 1979972.0,
"count": 66
},
"Pyramids.Step.sum": {
"value": 1979972.0,
"min": 29952.0,
"max": 1979972.0,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5534326434135437,
"min": -0.09447427839040756,
"max": 0.609072744846344,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 150.53367614746094,
"min": -22.768301010131836,
"max": 171.75851440429688,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007547972723841667,
"min": -0.015686098486185074,
"max": 0.4602912664413452,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.053048610687256,
"min": -4.282304763793945,
"max": 109.08902740478516,
"count": 66
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06894014026203026,
"min": 0.06445410102605818,
"max": 0.07318553161236367,
"count": 66
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9651619636684236,
"min": 0.5114105983249891,
"max": 1.071707970722962,
"count": 66
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011589847809582275,
"min": 0.00014715042579319283,
"max": 0.01403903107573223,
"count": 66
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16225786933415184,
"min": 0.0019129555353115067,
"max": 0.19952770731013553,
"count": 66
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.163312564642856e-06,
"min": 5.163312564642856e-06,
"max": 0.0002975753150939428,
"count": 66
},
"Pyramids.Policy.LearningRate.sum": {
"value": 7.228637590499999e-05,
"min": 7.228637590499999e-05,
"max": 0.003916468144510649,
"count": 66
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10172107142857142,
"min": 0.10172107142857142,
"max": 0.19919177142857142,
"count": 66
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4240949999999999,
"min": 1.3943424,
"max": 2.7054893500000006,
"count": 66
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00018193503571428573,
"min": 0.00018193503571428573,
"max": 0.009919257965714285,
"count": 66
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0025470905000000003,
"min": 0.0025470905000000003,
"max": 0.13055838606499998,
"count": 66
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005576462019234896,
"min": 0.005030772648751736,
"max": 0.49330100417137146,
"count": 66
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07807046920061111,
"min": 0.07110690325498581,
"max": 3.4531071186065674,
"count": 66
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 380.4024390243902,
"min": 315.3333333333333,
"max": 999.0,
"count": 66
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31193.0,
"min": 15984.0,
"max": 33271.0,
"count": 66
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4766770876854298,
"min": -1.0000000521540642,
"max": 1.6416451381419295,
"count": 66
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 122.56419827789068,
"min": -31.994401648640633,
"max": 152.67299784719944,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4766770876854298,
"min": -1.0000000521540642,
"max": 1.6416451381419295,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 122.56419827789068,
"min": -31.994401648640633,
"max": 152.67299784719944,
"count": 66
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.021517857894825985,
"min": 0.018689367788022212,
"max": 9.230703588109463,
"count": 66
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7859822052705567,
"min": 1.5916997588647064,
"max": 147.69125740975142,
"count": 66
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684967100",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684971533"
},
"total": 4432.418219368999,
"count": 1,
"self": 0.8425378719994114,
"children": {
"run_training.setup": {
"total": 0.06338283100001263,
"count": 1,
"self": 0.06338283100001263
},
"TrainerController.start_learning": {
"total": 4431.512298666,
"count": 1,
"self": 2.7566301501283306,
"children": {
"TrainerController._reset_env": {
"total": 4.453283148999958,
"count": 1,
"self": 4.453283148999958
},
"TrainerController.advance": {
"total": 4424.144755398872,
"count": 128216,
"self": 2.9100664849256646,
"children": {
"env_step": {
"total": 3110.9200945618422,
"count": 128216,
"self": 2885.296031356897,
"children": {
"SubprocessEnvManager._take_step": {
"total": 223.90028392495663,
"count": 128216,
"self": 9.632010484039938,
"children": {
"TorchPolicy.evaluate": {
"total": 214.2682734409167,
"count": 125062,
"self": 214.2682734409167
}
}
},
"workers": {
"total": 1.723779279988662,
"count": 128216,
"self": 0.0,
"children": {
"worker_root": {
"total": 4421.25423290699,
"count": 128216,
"is_parallel": true,
"self": 1768.1496972099885,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005590783000002375,
"count": 1,
"is_parallel": true,
"self": 0.003631979999909163,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001958803000093212,
"count": 8,
"is_parallel": true,
"self": 0.001958803000093212
}
}
},
"UnityEnvironment.step": {
"total": 0.05145241700006409,
"count": 1,
"is_parallel": true,
"self": 0.0005446720000463756,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004964710000194827,
"count": 1,
"is_parallel": true,
"self": 0.0004964710000194827
},
"communicator.exchange": {
"total": 0.04862403599997833,
"count": 1,
"is_parallel": true,
"self": 0.04862403599997833
},
"steps_from_proto": {
"total": 0.0017872380000198973,
"count": 1,
"is_parallel": true,
"self": 0.000406599000029928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013806389999899693,
"count": 8,
"is_parallel": true,
"self": 0.0013806389999899693
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2653.1045356970017,
"count": 128215,
"is_parallel": true,
"self": 65.44456140778811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 45.39854862909476,
"count": 128215,
"is_parallel": true,
"self": 45.39854862909476
},
"communicator.exchange": {
"total": 2344.497528541059,
"count": 128215,
"is_parallel": true,
"self": 2344.497528541059
},
"steps_from_proto": {
"total": 197.7638971190596,
"count": 128215,
"is_parallel": true,
"self": 40.730027855887556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 157.03386926317205,
"count": 1025720,
"is_parallel": true,
"self": 157.03386926317205
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1310.3145943521045,
"count": 128216,
"self": 5.543658799083914,
"children": {
"process_trajectory": {
"total": 221.8015003650222,
"count": 128216,
"self": 221.3202100970219,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4812902680002935,
"count": 4,
"self": 0.4812902680002935
}
}
},
"_update_policy": {
"total": 1082.9694351879982,
"count": 923,
"self": 696.4650409379615,
"children": {
"TorchPPOOptimizer.update": {
"total": 386.50439425003674,
"count": 45549,
"self": 386.50439425003674
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.247000000148546e-06,
"count": 1,
"self": 1.247000000148546e-06
},
"TrainerController._save_models": {
"total": 0.15762872100003733,
"count": 1,
"self": 0.00207904899980349,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15554967200023384,
"count": 1,
"self": 0.15554967200023384
}
}
}
}
}
}
}