VaidikML0508's picture
First Push
6b4ffef verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.17980921268463135,
"min": 0.17980921268463135,
"max": 1.4908032417297363,
"count": 68
},
"Pyramids.Policy.Entropy.sum": {
"value": 5397.1533203125,
"min": 5397.1533203125,
"max": 45225.0078125,
"count": 68
},
"Pyramids.Step.mean": {
"value": 2039938.0,
"min": 29952.0,
"max": 2039938.0,
"count": 68
},
"Pyramids.Step.sum": {
"value": 2039938.0,
"min": 29952.0,
"max": 2039938.0,
"count": 68
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8179959654808044,
"min": -0.10204073786735535,
"max": 0.8437695503234863,
"count": 68
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 247.0347900390625,
"min": -24.489776611328125,
"max": 253.2827911376953,
"count": 68
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008990243077278137,
"min": -0.005866355262696743,
"max": 0.23719657957553864,
"count": 68
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.7150533199310303,
"min": -1.7833720445632935,
"max": 56.92717742919922,
"count": 68
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06704078006270506,
"min": 0.0645019569540532,
"max": 0.07243784507621616,
"count": 68
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9385709208778708,
"min": 0.49196040106939964,
"max": 1.073367096163565,
"count": 68
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01307627064134653,
"min": 0.00015572338776235944,
"max": 0.017449374461735022,
"count": 68
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18306778897885143,
"min": 0.0017129572653859539,
"max": 0.2442912424642903,
"count": 68
},
"Pyramids.Policy.LearningRate.mean": {
"value": 9.748766750413332e-05,
"min": 9.748766750413332e-05,
"max": 0.00029838354339596195,
"count": 68
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0013648273450578665,
"min": 0.0013648273450578665,
"max": 0.0037595075468308662,
"count": 68
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.13249586666666666,
"min": 0.13249586666666666,
"max": 0.19946118095238097,
"count": 68
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.8549421333333331,
"min": 1.3962282666666668,
"max": 2.737527966666667,
"count": 68
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0032563370800000003,
"min": 0.0032563370800000003,
"max": 0.009946171977142856,
"count": 68
},
"Pyramids.Policy.Beta.sum": {
"value": 0.04558871912,
"min": 0.04558871912,
"max": 0.12533159642,
"count": 68
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007266219239681959,
"min": 0.00682816794142127,
"max": 0.38205215334892273,
"count": 68
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10172706842422485,
"min": 0.09559435397386551,
"max": 2.6743650436401367,
"count": 68
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 244.416,
"min": 217.78195488721803,
"max": 999.0,
"count": 68
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30552.0,
"min": 15984.0,
"max": 32746.0,
"count": 68
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7395823949575424,
"min": -1.0000000521540642,
"max": 1.7822180306328868,
"count": 68
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 217.4477993696928,
"min": -31.999601677060127,
"max": 237.03499807417393,
"count": 68
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7395823949575424,
"min": -1.0000000521540642,
"max": 1.7822180306328868,
"count": 68
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 217.4477993696928,
"min": -31.999601677060127,
"max": 237.03499807417393,
"count": 68
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.018703219723756773,
"min": 0.015945801764260102,
"max": 7.945512444712222,
"count": 68
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.337902465469597,
"min": 2.07823581318371,
"max": 127.12819911539555,
"count": 68
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 68
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 68
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1736590425",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1736595685"
},
"total": 5259.843416490001,
"count": 1,
"self": 0.3866337450008359,
"children": {
"run_training.setup": {
"total": 0.05692282600011822,
"count": 1,
"self": 0.05692282600011822
},
"TrainerController.start_learning": {
"total": 5259.399859919,
"count": 1,
"self": 3.2217839600052685,
"children": {
"TrainerController._reset_env": {
"total": 2.18508397100004,
"count": 1,
"self": 2.18508397100004
},
"TrainerController.advance": {
"total": 5253.857490259995,
"count": 133187,
"self": 3.3552704809671923,
"children": {
"env_step": {
"total": 3793.8598952431466,
"count": 133187,
"self": 3462.0814241241583,
"children": {
"SubprocessEnvManager._take_step": {
"total": 329.9098882581161,
"count": 133187,
"self": 10.118870759019046,
"children": {
"TorchPolicy.evaluate": {
"total": 319.79101749909705,
"count": 128800,
"self": 319.79101749909705
}
}
},
"workers": {
"total": 1.8685828608722659,
"count": 133186,
"self": 0.0,
"children": {
"worker_root": {
"total": 5248.689558098246,
"count": 133186,
"is_parallel": true,
"self": 2045.0334780562785,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021429349999380065,
"count": 1,
"is_parallel": true,
"self": 0.00070007899967095,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014428560002670565,
"count": 8,
"is_parallel": true,
"self": 0.0014428560002670565
}
}
},
"UnityEnvironment.step": {
"total": 0.05158772899994801,
"count": 1,
"is_parallel": true,
"self": 0.0006383649997587781,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005218189999141032,
"count": 1,
"is_parallel": true,
"self": 0.0005218189999141032
},
"communicator.exchange": {
"total": 0.0486770739998974,
"count": 1,
"is_parallel": true,
"self": 0.0486770739998974
},
"steps_from_proto": {
"total": 0.001750471000377729,
"count": 1,
"is_parallel": true,
"self": 0.00037436999946294236,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013761010009147867,
"count": 8,
"is_parallel": true,
"self": 0.0013761010009147867
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3203.656080041968,
"count": 133185,
"is_parallel": true,
"self": 71.36936587535683,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 49.997467591988425,
"count": 133185,
"is_parallel": true,
"self": 49.997467591988425
},
"communicator.exchange": {
"total": 2865.762534146872,
"count": 133185,
"is_parallel": true,
"self": 2865.762534146872
},
"steps_from_proto": {
"total": 216.52671242775068,
"count": 133185,
"is_parallel": true,
"self": 45.29428247979649,
"children": {
"_process_rank_one_or_two_observation": {
"total": 171.2324299479542,
"count": 1065480,
"is_parallel": true,
"self": 171.2324299479542
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1456.6423245358815,
"count": 133186,
"self": 5.897573797948098,
"children": {
"process_trajectory": {
"total": 293.17314737793413,
"count": 133186,
"self": 292.7749743799336,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3981729980005184,
"count": 4,
"self": 0.3981729980005184
}
}
},
"_update_policy": {
"total": 1157.5716033599992,
"count": 942,
"self": 650.7413175971578,
"children": {
"TorchPPOOptimizer.update": {
"total": 506.8302857628414,
"count": 46998,
"self": 506.8302857628414
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5240002539940178e-06,
"count": 1,
"self": 1.5240002539940178e-06
},
"TrainerController._save_models": {
"total": 0.13550020399998175,
"count": 1,
"self": 0.002136614999471931,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13336358900050982,
"count": 1,
"self": 0.13336358900050982
}
}
}
}
}
}
}