ppo-Pyramids / run_logs /timers.json
Rafaelfr87's picture
First Push
f1ed222
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41191014647483826,
"min": 0.410511314868927,
"max": 1.3573895692825317,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12265.0361328125,
"min": 12265.0361328125,
"max": 41177.76953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989977.0,
"min": 29952.0,
"max": 989977.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989977.0,
"min": 29952.0,
"max": 989977.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5855107307434082,
"min": -0.13582341372966766,
"max": 0.5885475277900696,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 165.6995391845703,
"min": -32.190147399902344,
"max": 165.6995391845703,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025272591039538383,
"min": -0.04805679991841316,
"max": 0.591279149055481,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.1521430015563965,
"min": -13.455904006958008,
"max": 140.13316345214844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06838421758757683,
"min": 0.06552355552202414,
"max": 0.0722737830309518,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9573790462260756,
"min": 0.4899935327308391,
"max": 1.0631852610180537,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014576204605483714,
"min": 0.00026563125680687454,
"max": 0.01567977282969628,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.204066864476772,
"min": 0.0031875750816824943,
"max": 0.2195168196157479,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.648468879114282e-06,
"min": 7.648468879114282e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010707856430759995,
"min": 0.00010707856430759995,
"max": 0.0034370688543104,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254945714285714,
"min": 0.10254945714285714,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356924,
"min": 1.3886848,
"max": 2.401491,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002646907685714285,
"min": 0.0002646907685714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003705670759999999,
"min": 0.003705670759999999,
"max": 0.11457439104000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01213882863521576,
"min": 0.01213882863521576,
"max": 0.580790638923645,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16994360089302063,
"min": 0.16994360089302063,
"max": 4.065534591674805,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 325.4719101123595,
"min": 317.7608695652174,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28967.0,
"min": 15984.0,
"max": 33343.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6070943621269773,
"min": -1.0000000521540642,
"max": 1.6387434588826222,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 143.03139822930098,
"min": -31.99480164051056,
"max": 150.76439821720123,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6070943621269773,
"min": -1.0000000521540642,
"max": 1.6387434588826222,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 143.03139822930098,
"min": -31.99480164051056,
"max": 150.76439821720123,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0409522373327368,
"min": 0.0407138881722779,
"max": 12.354949859902263,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6447491226135753,
"min": 3.6447491226135753,
"max": 197.6791977584362,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704451162",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704453326"
},
"total": 2164.176653004,
"count": 1,
"self": 0.5393733010000687,
"children": {
"run_training.setup": {
"total": 0.045887376000109725,
"count": 1,
"self": 0.045887376000109725
},
"TrainerController.start_learning": {
"total": 2163.591392327,
"count": 1,
"self": 1.3315497310168212,
"children": {
"TrainerController._reset_env": {
"total": 2.3400908260000506,
"count": 1,
"self": 2.3400908260000506
},
"TrainerController.advance": {
"total": 2159.8362210999826,
"count": 63788,
"self": 1.441102931968544,
"children": {
"env_step": {
"total": 1551.81913347799,
"count": 63788,
"self": 1423.2697383639943,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.71271043499405,
"count": 63788,
"self": 4.685208758019826,
"children": {
"TorchPolicy.evaluate": {
"total": 123.02750167697423,
"count": 62553,
"self": 123.02750167697423
}
}
},
"workers": {
"total": 0.8366846790015643,
"count": 63788,
"self": 0.0,
"children": {
"worker_root": {
"total": 2158.9348992080454,
"count": 63788,
"is_parallel": true,
"self": 852.5070708969806,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018489200001567951,
"count": 1,
"is_parallel": true,
"self": 0.0006173270003273501,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001231592999829445,
"count": 8,
"is_parallel": true,
"self": 0.001231592999829445
}
}
},
"UnityEnvironment.step": {
"total": 0.07042400599993925,
"count": 1,
"is_parallel": true,
"self": 0.0006043890000455576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004804279999461869,
"count": 1,
"is_parallel": true,
"self": 0.0004804279999461869
},
"communicator.exchange": {
"total": 0.06759693399999378,
"count": 1,
"is_parallel": true,
"self": 0.06759693399999378
},
"steps_from_proto": {
"total": 0.0017422549999537296,
"count": 1,
"is_parallel": true,
"self": 0.0003649279994988319,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013773270004548976,
"count": 8,
"is_parallel": true,
"self": 0.0013773270004548976
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1306.4278283110648,
"count": 63787,
"is_parallel": true,
"self": 34.751958534882306,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.196808810059565,
"count": 63787,
"is_parallel": true,
"self": 26.196808810059565
},
"communicator.exchange": {
"total": 1144.524449313097,
"count": 63787,
"is_parallel": true,
"self": 1144.524449313097
},
"steps_from_proto": {
"total": 100.95461165302595,
"count": 63787,
"is_parallel": true,
"self": 20.364912821812368,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.58969883121358,
"count": 510296,
"is_parallel": true,
"self": 80.58969883121358
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 606.5759846900244,
"count": 63788,
"self": 2.575056207084117,
"children": {
"process_trajectory": {
"total": 123.75336418394204,
"count": 63788,
"self": 123.56403019394224,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18933398999979545,
"count": 2,
"self": 0.18933398999979545
}
}
},
"_update_policy": {
"total": 480.2475642989982,
"count": 451,
"self": 281.9987276679815,
"children": {
"TorchPPOOptimizer.update": {
"total": 198.24883663101673,
"count": 22827,
"self": 198.24883663101673
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.9900002118665725e-06,
"count": 1,
"self": 2.9900002118665725e-06
},
"TrainerController._save_models": {
"total": 0.08352768000031574,
"count": 1,
"self": 0.001408433000506193,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08211924699980955,
"count": 1,
"self": 0.08211924699980955
}
}
}
}
}
}
}