ppo-PyramidsRND / run_logs /timers.json
messham's picture
2m steps Push
d2ea563
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3535670340061188,
"min": 0.34077608585357666,
"max": 1.3484305143356323,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10590.0400390625,
"min": 10228.7353515625,
"max": 40905.98828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989896.0,
"min": 29952.0,
"max": 989896.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989896.0,
"min": 29952.0,
"max": 989896.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.48649317026138306,
"min": -0.09194578975439072,
"max": 0.524970293045044,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 132.81263732910156,
"min": -22.158935546875,
"max": 141.74197387695312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0035755091812461615,
"min": 0.0035755091812461615,
"max": 0.3392353653907776,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.97611403465271,
"min": 0.97611403465271,
"max": 82.09495544433594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06897587805274052,
"min": 0.06615012882181384,
"max": 0.07381867904889065,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9656622927383673,
"min": 0.48773620794855066,
"max": 1.0509990786764924,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014946910136911485,
"min": 0.000682130938356236,
"max": 0.014946910136911485,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20925674191676077,
"min": 0.008867702198631068,
"max": 0.22313221352912171,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4269189529642895e-06,
"min": 7.4269189529642895e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010397686534150006,
"min": 0.00010397686534150006,
"max": 0.0035087858304048,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247560714285717,
"min": 0.10247560714285717,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346585000000003,
"min": 1.3886848,
"max": 2.5695952,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025731315357142875,
"min": 0.00025731315357142875,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003602384150000002,
"min": 0.003602384150000002,
"max": 0.11698256048,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01400444470345974,
"min": 0.013761998154222965,
"max": 0.584896445274353,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19606222212314606,
"min": 0.19266797602176666,
"max": 4.094274997711182,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 387.0253164556962,
"min": 352.2674418604651,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30575.0,
"min": 15984.0,
"max": 33974.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5116708599313904,
"min": -1.0000000521540642,
"max": 1.5695290971971765,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 119.42199793457985,
"min": -29.10040158033371,
"max": 135.50539788603783,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5116708599313904,
"min": -1.0000000521540642,
"max": 1.5695290971971765,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 119.42199793457985,
"min": -29.10040158033371,
"max": 135.50539788603783,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05562157640336789,
"min": 0.05060062655490894,
"max": 12.438773028552532,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.394104535866063,
"min": 3.997449497837806,
"max": 199.02036845684052,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680953839",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680957391"
},
"total": 3551.503820808999,
"count": 1,
"self": 0.6361241759987024,
"children": {
"run_training.setup": {
"total": 0.14982996599974285,
"count": 1,
"self": 0.14982996599974285
},
"TrainerController.start_learning": {
"total": 3550.7178666670006,
"count": 1,
"self": 2.7316940108339622,
"children": {
"TrainerController._reset_env": {
"total": 1.6410269979996883,
"count": 1,
"self": 1.6410269979996883
},
"TrainerController.advance": {
"total": 3546.198045609167,
"count": 63766,
"self": 3.110299505943658,
"children": {
"env_step": {
"total": 2348.814827719065,
"count": 63766,
"self": 2197.453305610319,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.67374686485255,
"count": 63766,
"self": 7.317843172706489,
"children": {
"TorchPolicy.evaluate": {
"total": 142.35590369214606,
"count": 62559,
"self": 142.35590369214606
}
}
},
"workers": {
"total": 1.6877752438931566,
"count": 63766,
"self": 0.0,
"children": {
"worker_root": {
"total": 3542.939803263921,
"count": 63766,
"is_parallel": true,
"self": 1532.953221414883,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004387333999147813,
"count": 1,
"is_parallel": true,
"self": 0.0013288409991218941,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003058493000025919,
"count": 8,
"is_parallel": true,
"self": 0.003058493000025919
}
}
},
"UnityEnvironment.step": {
"total": 0.1513788679994832,
"count": 1,
"is_parallel": true,
"self": 0.0010368029979872517,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000664497000798292,
"count": 1,
"is_parallel": true,
"self": 0.000664497000798292
},
"communicator.exchange": {
"total": 0.1475030270003117,
"count": 1,
"is_parallel": true,
"self": 0.1475030270003117
},
"steps_from_proto": {
"total": 0.0021745410003859433,
"count": 1,
"is_parallel": true,
"self": 0.00047666000136814546,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016978809990177979,
"count": 8,
"is_parallel": true,
"self": 0.0016978809990177979
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2009.986581849038,
"count": 63765,
"is_parallel": true,
"self": 47.4927526133788,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.146782983776575,
"count": 63765,
"is_parallel": true,
"self": 28.146782983776575
},
"communicator.exchange": {
"total": 1795.283179810207,
"count": 63765,
"is_parallel": true,
"self": 1795.283179810207
},
"steps_from_proto": {
"total": 139.0638664416756,
"count": 63765,
"is_parallel": true,
"self": 32.22027491843983,
"children": {
"_process_rank_one_or_two_observation": {
"total": 106.84359152323577,
"count": 510120,
"is_parallel": true,
"self": 106.84359152323577
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1194.2729183841584,
"count": 63766,
"self": 5.4089528384574805,
"children": {
"process_trajectory": {
"total": 150.66396160870408,
"count": 63766,
"self": 150.41522442770565,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24873718099843245,
"count": 2,
"self": 0.24873718099843245
}
}
},
"_update_policy": {
"total": 1038.2000039369968,
"count": 456,
"self": 423.2363656109601,
"children": {
"TorchPPOOptimizer.update": {
"total": 614.9636383260367,
"count": 22812,
"self": 614.9636383260367
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5850000636419281e-06,
"count": 1,
"self": 1.5850000636419281e-06
},
"TrainerController._save_models": {
"total": 0.1470984640000097,
"count": 1,
"self": 0.0019834670001728227,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14511499699983688,
"count": 1,
"self": 0.14511499699983688
}
}
}
}
}
}
}