ppo-Pyramid / run_logs /timers.json
aadarshram's picture
First push
55dbe98 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.463517963886261,
"min": 0.4533245265483856,
"max": 1.459008812904358,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13964.869140625,
"min": 13482.865234375,
"max": 44260.4921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989962.0,
"min": 29913.0,
"max": 989962.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989962.0,
"min": 29913.0,
"max": 989962.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4789871871471405,
"min": -0.10047843307256699,
"max": 0.5532966256141663,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 130.28451538085938,
"min": -24.315780639648438,
"max": 154.3697509765625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.009280841797590256,
"min": 0.00644303672015667,
"max": 0.23838819563388824,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.5243890285491943,
"min": 1.5592148303985596,
"max": 56.49800109863281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0673857955701856,
"min": 0.06413194288205935,
"max": 0.07273836340700267,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0107869335527842,
"min": 0.5091685438490187,
"max": 1.0324496912082894,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01604025702884731,
"min": 0.0007724836082668743,
"max": 0.01743947066063324,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24060385543270965,
"min": 0.01081477051573624,
"max": 0.24564770540746392,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4426975191333305e-06,
"min": 7.4426975191333305e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011164046278699996,
"min": 0.00011164046278699996,
"max": 0.0037581388472870997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248086666666666,
"min": 0.10248086666666666,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.537213,
"min": 1.3886848,
"max": 2.6527129,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025783857999999994,
"min": 0.00025783857999999994,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038675786999999994,
"min": 0.0038675786999999994,
"max": 0.12528601871000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009184201247990131,
"min": 0.009184201247990131,
"max": 0.3884141743183136,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13776302337646484,
"min": 0.1351100504398346,
"max": 2.7188992500305176,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 387.5844155844156,
"min": 321.4782608695652,
"max": 997.6333333333333,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29844.0,
"min": 16840.0,
"max": 33254.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5344805014984948,
"min": -0.9316867190102737,
"max": 1.6132825920763223,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 118.1549986153841,
"min": -28.218201704323292,
"max": 148.42199847102165,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5344805014984948,
"min": -0.9316867190102737,
"max": 1.6132825920763223,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 118.1549986153841,
"min": -28.218201704323292,
"max": 148.42199847102165,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03672610307505427,
"min": 0.03217995077226948,
"max": 7.690991434104302,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8279099367791787,
"min": 2.8279099367791787,
"max": 130.74685437977314,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1720023473",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1720025766"
},
"total": 2293.159648346,
"count": 1,
"self": 0.5809694609997678,
"children": {
"run_training.setup": {
"total": 0.056248764999963896,
"count": 1,
"self": 0.056248764999963896
},
"TrainerController.start_learning": {
"total": 2292.52243012,
"count": 1,
"self": 1.5291125939579615,
"children": {
"TrainerController._reset_env": {
"total": 2.9597422669999105,
"count": 1,
"self": 2.9597422669999105
},
"TrainerController.advance": {
"total": 2287.938183929042,
"count": 63813,
"self": 1.520901688061258,
"children": {
"env_step": {
"total": 1607.1957797959828,
"count": 63813,
"self": 1467.2834005599707,
"children": {
"SubprocessEnvManager._take_step": {
"total": 138.98623399403778,
"count": 63813,
"self": 4.918276974048695,
"children": {
"TorchPolicy.evaluate": {
"total": 134.06795701998908,
"count": 62545,
"self": 134.06795701998908
}
}
},
"workers": {
"total": 0.9261452419743819,
"count": 63813,
"self": 0.0,
"children": {
"worker_root": {
"total": 2287.398150821039,
"count": 63813,
"is_parallel": true,
"self": 950.1177328320766,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021041119998699287,
"count": 1,
"is_parallel": true,
"self": 0.0006066869996175228,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014974250002524059,
"count": 8,
"is_parallel": true,
"self": 0.0014974250002524059
}
}
},
"UnityEnvironment.step": {
"total": 0.10678609499996128,
"count": 1,
"is_parallel": true,
"self": 0.0007274150000284862,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004757680001148401,
"count": 1,
"is_parallel": true,
"self": 0.0004757680001148401
},
"communicator.exchange": {
"total": 0.10391382099987823,
"count": 1,
"is_parallel": true,
"self": 0.10391382099987823
},
"steps_from_proto": {
"total": 0.0016690909999397263,
"count": 1,
"is_parallel": true,
"self": 0.00034679999998843414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013222909999512922,
"count": 8,
"is_parallel": true,
"self": 0.0013222909999512922
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1337.2804179889624,
"count": 63812,
"is_parallel": true,
"self": 37.13434601986705,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.730874310005447,
"count": 63812,
"is_parallel": true,
"self": 24.730874310005447
},
"communicator.exchange": {
"total": 1172.2282801310153,
"count": 63812,
"is_parallel": true,
"self": 1172.2282801310153
},
"steps_from_proto": {
"total": 103.18691752807467,
"count": 63812,
"is_parallel": true,
"self": 21.32063098181493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.86628654625974,
"count": 510496,
"is_parallel": true,
"self": 81.86628654625974
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 679.2215024449984,
"count": 63813,
"self": 2.915941159013528,
"children": {
"process_trajectory": {
"total": 136.53364377698404,
"count": 63813,
"self": 136.33482087498373,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19882290200030184,
"count": 2,
"self": 0.19882290200030184
}
}
},
"_update_policy": {
"total": 539.7719175090008,
"count": 458,
"self": 321.51204054895493,
"children": {
"TorchPPOOptimizer.update": {
"total": 218.25987696004586,
"count": 22761,
"self": 218.25987696004586
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.350001164420974e-07,
"count": 1,
"self": 9.350001164420974e-07
},
"TrainerController._save_models": {
"total": 0.09539039499986757,
"count": 1,
"self": 0.002154580000023998,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09323581499984357,
"count": 1,
"self": 0.09323581499984357
}
}
}
}
}
}
}