ppo-PyramidsRND / run_logs /timers.json
StaffanRolfsson's picture
First Attempt
cfb1c2b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6854422092437744,
"min": 0.6854422092437744,
"max": 1.530413269996643,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20519.3984375,
"min": 20519.3984375,
"max": 46426.6171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989898.0,
"min": 29952.0,
"max": 989898.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989898.0,
"min": 29952.0,
"max": 989898.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.09317854791879654,
"min": -0.14085794985294342,
"max": 0.14089640974998474,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 23.108280181884766,
"min": -33.38333511352539,
"max": 34.94231033325195,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01638438180088997,
"min": 0.0034563359804451466,
"max": 0.18125468492507935,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.063326835632324,
"min": 0.8571712970733643,
"max": 42.957359313964844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06798235708109585,
"min": 0.06344787889387614,
"max": 0.07135744118086775,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0197353562164377,
"min": 0.4695197011032356,
"max": 1.0197353562164377,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007676091034824103,
"min": 3.42937705489472e-05,
"max": 0.008420066784787022,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.11514136552236155,
"min": 0.00044581901713631355,
"max": 0.11788093498701832,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.508197497299997e-06,
"min": 7.508197497299997e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011262296245949996,
"min": 0.00011262296245949996,
"max": 0.0033819344726886006,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025027,
"min": 0.1025027,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375405,
"min": 1.3691136000000002,
"max": 2.5273114000000008,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026001973000000007,
"min": 0.00026001973000000007,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039002959500000008,
"min": 0.0039002959500000008,
"max": 0.11275840886000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008498873561620712,
"min": 0.008498873561620712,
"max": 0.2964560091495514,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12748309969902039,
"min": 0.11913242936134338,
"max": 2.0751919746398926,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 767.1282051282051,
"min": 712.175,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29918.0,
"min": 15984.0,
"max": 32181.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.2581435560415953,
"min": -1.0000000521540642,
"max": 0.4099435473863895,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 10.067598685622215,
"min": -32.000001668930054,
"max": 15.987798348069191,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.2581435560415953,
"min": -1.0000000521540642,
"max": 0.4099435473863895,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 10.067598685622215,
"min": -32.000001668930054,
"max": 15.987798348069191,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06777025429078211,
"min": 0.06360156870505307,
"max": 6.563605971634388,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.643039917340502,
"min": 2.5440627482021227,
"max": 105.01769554615021,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1730454492",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1730456684"
},
"total": 2191.806868478,
"count": 1,
"self": 0.6943519860001288,
"children": {
"run_training.setup": {
"total": 0.05929490500011525,
"count": 1,
"self": 0.05929490500011525
},
"TrainerController.start_learning": {
"total": 2191.053221587,
"count": 1,
"self": 1.408461138011262,
"children": {
"TrainerController._reset_env": {
"total": 2.4186169800000243,
"count": 1,
"self": 2.4186169800000243
},
"TrainerController.advance": {
"total": 2187.1391110479885,
"count": 63159,
"self": 1.4818728699406165,
"children": {
"env_step": {
"total": 1476.5598903710495,
"count": 63159,
"self": 1318.4552605490846,
"children": {
"SubprocessEnvManager._take_step": {
"total": 157.230744208975,
"count": 63159,
"self": 4.743012305939828,
"children": {
"TorchPolicy.evaluate": {
"total": 152.48773190303518,
"count": 62554,
"self": 152.48773190303518
}
}
},
"workers": {
"total": 0.8738856129898522,
"count": 63159,
"self": 0.0,
"children": {
"worker_root": {
"total": 2185.8627173759874,
"count": 63159,
"is_parallel": true,
"self": 987.238679166006,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020921770001223194,
"count": 1,
"is_parallel": true,
"self": 0.0006532299998980307,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014389470002242888,
"count": 8,
"is_parallel": true,
"self": 0.0014389470002242888
}
}
},
"UnityEnvironment.step": {
"total": 0.049834370000098716,
"count": 1,
"is_parallel": true,
"self": 0.0006961530002627114,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046789999987595365,
"count": 1,
"is_parallel": true,
"self": 0.00046789999987595365
},
"communicator.exchange": {
"total": 0.046971153999948,
"count": 1,
"is_parallel": true,
"self": 0.046971153999948
},
"steps_from_proto": {
"total": 0.0016991630000120495,
"count": 1,
"is_parallel": true,
"self": 0.00036070600026505417,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013384569997469953,
"count": 8,
"is_parallel": true,
"self": 0.0013384569997469953
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1198.6240382099813,
"count": 63158,
"is_parallel": true,
"self": 34.39747256710098,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.97356503496826,
"count": 63158,
"is_parallel": true,
"self": 22.97356503496826
},
"communicator.exchange": {
"total": 1042.3158710379942,
"count": 63158,
"is_parallel": true,
"self": 1042.3158710379942
},
"steps_from_proto": {
"total": 98.93712956991794,
"count": 63158,
"is_parallel": true,
"self": 20.33741738390836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.59971218600958,
"count": 505264,
"is_parallel": true,
"self": 78.59971218600958
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 709.0973478069986,
"count": 63159,
"self": 2.5114280899911137,
"children": {
"process_trajectory": {
"total": 131.3474626840091,
"count": 63159,
"self": 131.10292807100882,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24453461300026902,
"count": 2,
"self": 0.24453461300026902
}
}
},
"_update_policy": {
"total": 575.2384570329984,
"count": 439,
"self": 322.7364709079891,
"children": {
"TorchPPOOptimizer.update": {
"total": 252.50198612500935,
"count": 22845,
"self": 252.50198612500935
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0439998732181266e-06,
"count": 1,
"self": 1.0439998732181266e-06
},
"TrainerController._save_models": {
"total": 0.0870313770001303,
"count": 1,
"self": 0.001499715000136348,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08553166199999396,
"count": 1,
"self": 0.08553166199999396
}
}
}
}
}
}
}