sglz's picture
First Push
e1e46b4 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8525972962379456,
"min": 0.7685958743095398,
"max": 1.4148558378219604,
"count": 6
},
"Pyramids.Policy.Entropy.sum": {
"value": 25687.05078125,
"min": 23107.06640625,
"max": 42921.06640625,
"count": 6
},
"Pyramids.Step.mean": {
"value": 179976.0,
"min": 29999.0,
"max": 179976.0,
"count": 6
},
"Pyramids.Step.sum": {
"value": 179976.0,
"min": 29999.0,
"max": 179976.0,
"count": 6
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06478046625852585,
"min": -0.17248746752738953,
"max": -0.06478046625852585,
"count": 6
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -15.676873207092285,
"min": -41.05201721191406,
"max": -15.676873207092285,
"count": 6
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.08444657921791077,
"min": 0.08444657921791077,
"max": 0.3411673903465271,
"count": 6
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 20.436071395874023,
"min": 20.436071395874023,
"max": 81.88017272949219,
"count": 6
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06848841694629206,
"min": 0.06645446432290963,
"max": 0.07442661318584726,
"count": 6
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9588378372480887,
"min": 0.595412905486778,
"max": 0.9588378372480887,
"count": 6
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0022097700208396237,
"min": 0.00028507633900457706,
"max": 0.010728728211535882,
"count": 6
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.03093678029175473,
"min": 0.0031358397290503475,
"max": 0.08582982569228706,
"count": 6
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0002835495769120475,
"min": 0.0002835495769120475,
"max": 0.00029840696303101244,
"count": 6
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.003969694076768665,
"min": 0.0023872557042480995,
"max": 0.003969694076768665,
"count": 6
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.19451652380952383,
"min": 0.19451652380952383,
"max": 0.19946898750000003,
"count": 6
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.7232313333333336,
"min": 1.5957519000000002,
"max": 2.7232313333333336,
"count": 6
},
"Pyramids.Policy.Beta.mean": {
"value": 0.009452200728571428,
"min": 0.009452200728571428,
"max": 0.00994695185125,
"count": 6
},
"Pyramids.Policy.Beta.sum": {
"value": 0.13233081019999998,
"min": 0.07957561481,
"max": 0.13233081019999998,
"count": 6
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.06705036759376526,
"min": 0.06705036759376526,
"max": 0.6905897259712219,
"count": 6
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.9387051463127136,
"min": 0.9387051463127136,
"max": 5.524717807769775,
"count": 6
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 943.4516129032259,
"min": 943.4516129032259,
"max": 999.0,
"count": 6
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29247.0,
"min": 15903.0,
"max": 32265.0,
"count": 6
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.6216258548440472,
"min": -0.999987552408129,
"max": -0.5888727819829276,
"count": 6
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -19.270401500165462,
"min": -31.999601677060127,
"max": -13.918000780045986,
"count": 6
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.6216258548440472,
"min": -0.999987552408129,
"max": -0.5888727819829276,
"count": 6
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -19.270401500165462,
"min": -31.999601677060127,
"max": -13.918000780045986,
"count": 6
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.6561870046500717,
"min": 0.6561870046500717,
"max": 13.471240378916264,
"count": 6
},
"Pyramids.Policy.RndReward.sum": {
"value": 20.341797144152224,
"min": 20.341797144152224,
"max": 215.53984606266022,
"count": 6
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 6
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 6
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1734345721",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1734346158"
},
"total": 437.57680742,
"count": 1,
"self": 0.4788514070000929,
"children": {
"run_training.setup": {
"total": 0.055545781000091665,
"count": 1,
"self": 0.055545781000091665
},
"TrainerController.start_learning": {
"total": 437.04241023199984,
"count": 1,
"self": 0.3046039039959396,
"children": {
"TrainerController._reset_env": {
"total": 2.066780240999833,
"count": 1,
"self": 2.066780240999833
},
"TrainerController.advance": {
"total": 434.34583015300404,
"count": 12589,
"self": 0.31472872200356505,
"children": {
"env_step": {
"total": 294.3329148910093,
"count": 12589,
"self": 260.373176297001,
"children": {
"SubprocessEnvManager._take_step": {
"total": 33.77240578397914,
"count": 12589,
"self": 0.9903168230011943,
"children": {
"TorchPolicy.evaluate": {
"total": 32.782088960977944,
"count": 12546,
"self": 32.782088960977944
}
}
},
"workers": {
"total": 0.18733281002914737,
"count": 12588,
"self": 0.0,
"children": {
"worker_root": {
"total": 435.69762289705704,
"count": 12588,
"is_parallel": true,
"self": 199.80945987904624,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024325839999619348,
"count": 1,
"is_parallel": true,
"self": 0.0009537169989926042,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014788670009693305,
"count": 8,
"is_parallel": true,
"self": 0.0014788670009693305
}
}
},
"UnityEnvironment.step": {
"total": 0.050896321999971406,
"count": 1,
"is_parallel": true,
"self": 0.0005599350001830317,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005067689999123104,
"count": 1,
"is_parallel": true,
"self": 0.0005067689999123104
},
"communicator.exchange": {
"total": 0.04808272299987948,
"count": 1,
"is_parallel": true,
"self": 0.04808272299987948
},
"steps_from_proto": {
"total": 0.0017468949999965844,
"count": 1,
"is_parallel": true,
"self": 0.0004280309999558085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013188640000407759,
"count": 8,
"is_parallel": true,
"self": 0.0013188640000407759
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 235.8881630180108,
"count": 12587,
"is_parallel": true,
"self": 6.59218486409236,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.654961781996462,
"count": 12587,
"is_parallel": true,
"self": 4.654961781996462
},
"communicator.exchange": {
"total": 204.33458181894184,
"count": 12587,
"is_parallel": true,
"self": 204.33458181894184
},
"steps_from_proto": {
"total": 20.306434552980136,
"count": 12587,
"is_parallel": true,
"self": 4.244187505000809,
"children": {
"_process_rank_one_or_two_observation": {
"total": 16.062247047979326,
"count": 100696,
"is_parallel": true,
"self": 16.062247047979326
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 139.69818653999118,
"count": 12588,
"self": 0.4824562690005223,
"children": {
"process_trajectory": {
"total": 24.855625302993303,
"count": 12588,
"self": 24.855625302993303
},
"_update_policy": {
"total": 114.36010496799736,
"count": 76,
"self": 63.804899489011405,
"children": {
"TorchPPOOptimizer.update": {
"total": 50.555205478985954,
"count": 4593,
"self": 50.555205478985954
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.690000317466911e-06,
"count": 1,
"self": 1.690000317466911e-06
},
"TrainerController._save_models": {
"total": 0.32519424399970376,
"count": 1,
"self": 0.003326080000078946,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3218681639996248,
"count": 1,
"self": 0.3218681639996248
}
}
}
}
}
}
}