Mikepool117's picture
First Push
b4cf063
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3739411234855652,
"min": 0.3739411234855652,
"max": 1.5047718286514282,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11200.2841796875,
"min": 11200.2841796875,
"max": 45648.7578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989928.0,
"min": 29952.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989928.0,
"min": 29952.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6031209230422974,
"min": -0.10538581758737564,
"max": 0.6545706391334534,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 170.08010864257812,
"min": -25.397981643676758,
"max": 183.27978515625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007571254391223192,
"min": -0.021212035790085793,
"max": 0.4121011197566986,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.1350936889648438,
"min": -5.812098026275635,
"max": 97.66796875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06601344463286093,
"min": 0.06590305171786943,
"max": 0.07301525055195324,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.990201669492914,
"min": 0.5111067538636727,
"max": 1.033990620885971,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015789575056017687,
"min": 0.0003668796064034191,
"max": 0.016519466483476294,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23684362584026533,
"min": 0.00403567567043761,
"max": 0.23684362584026533,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.469217510293336e-06,
"min": 7.469217510293336e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011203826265440003,
"min": 0.00011203826265440003,
"max": 0.0031444331518556993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248970666666667,
"min": 0.10248970666666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373456,
"min": 1.3886848,
"max": 2.4428719,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025872169600000004,
"min": 0.00025872169600000004,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003880825440000001,
"min": 0.003880825440000001,
"max": 0.10483961556999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01223546639084816,
"min": 0.01223546639084816,
"max": 0.399586945772171,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1835319995880127,
"min": 0.17160239815711975,
"max": 2.7971086502075195,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 337.42857142857144,
"min": 307.8695652173913,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30706.0,
"min": 15984.0,
"max": 33042.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6185666473375426,
"min": -1.0000000521540642,
"max": 1.6921304154979147,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.67099826037884,
"min": -31.99760165810585,
"max": 162.71319769322872,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6185666473375426,
"min": -1.0000000521540642,
"max": 1.6921304154979147,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.67099826037884,
"min": -31.99760165810585,
"max": 162.71319769322872,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.042794016491906305,
"min": 0.03934783413063522,
"max": 8.62995847966522,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8514614842715673,
"min": 3.6200007400184404,
"max": 138.07933567464352,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684301152",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684303342"
},
"total": 2189.97745116,
"count": 1,
"self": 0.78913496999985,
"children": {
"run_training.setup": {
"total": 0.03847691000009945,
"count": 1,
"self": 0.03847691000009945
},
"TrainerController.start_learning": {
"total": 2189.14983928,
"count": 1,
"self": 1.407952127954104,
"children": {
"TrainerController._reset_env": {
"total": 3.7742672700001094,
"count": 1,
"self": 3.7742672700001094
},
"TrainerController.advance": {
"total": 2183.824237406046,
"count": 63930,
"self": 1.372887170903141,
"children": {
"env_step": {
"total": 1562.2141009810357,
"count": 63930,
"self": 1455.0789518310191,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.31793980702514,
"count": 63930,
"self": 4.631091964018879,
"children": {
"TorchPolicy.evaluate": {
"total": 101.68684784300626,
"count": 62570,
"self": 101.68684784300626
}
}
},
"workers": {
"total": 0.8172093429914185,
"count": 63930,
"self": 0.0,
"children": {
"worker_root": {
"total": 2184.095313795957,
"count": 63930,
"is_parallel": true,
"self": 839.8432322699473,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017222149999724934,
"count": 1,
"is_parallel": true,
"self": 0.0005202659997394221,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012019490002330713,
"count": 8,
"is_parallel": true,
"self": 0.0012019490002330713
}
}
},
"UnityEnvironment.step": {
"total": 0.04694044899997607,
"count": 1,
"is_parallel": true,
"self": 0.0005485209997004858,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004552560001229722,
"count": 1,
"is_parallel": true,
"self": 0.0004552560001229722
},
"communicator.exchange": {
"total": 0.04408987600004366,
"count": 1,
"is_parallel": true,
"self": 0.04408987600004366
},
"steps_from_proto": {
"total": 0.0018467960001089523,
"count": 1,
"is_parallel": true,
"self": 0.0004018269996777235,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014449690004312288,
"count": 8,
"is_parallel": true,
"self": 0.0014449690004312288
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1344.2520815260095,
"count": 63929,
"is_parallel": true,
"self": 32.79690609611566,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.227647016948822,
"count": 63929,
"is_parallel": true,
"self": 22.227647016948822
},
"communicator.exchange": {
"total": 1189.3575596429969,
"count": 63929,
"is_parallel": true,
"self": 1189.3575596429969
},
"steps_from_proto": {
"total": 99.86996876994817,
"count": 63929,
"is_parallel": true,
"self": 19.788308064037437,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.08166070591074,
"count": 511432,
"is_parallel": true,
"self": 80.08166070591074
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 620.2372492541072,
"count": 63930,
"self": 2.5969638241110715,
"children": {
"process_trajectory": {
"total": 105.65010969499258,
"count": 63930,
"self": 105.38112108899236,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2689886060002209,
"count": 2,
"self": 0.2689886060002209
}
}
},
"_update_policy": {
"total": 511.9901757350035,
"count": 442,
"self": 330.19687346101364,
"children": {
"TorchPPOOptimizer.update": {
"total": 181.79330227398987,
"count": 22827,
"self": 181.79330227398987
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.234999672306003e-06,
"count": 1,
"self": 1.234999672306003e-06
},
"TrainerController._save_models": {
"total": 0.1433812410000428,
"count": 1,
"self": 0.0018343860001550638,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14154685499988773,
"count": 1,
"self": 0.14154685499988773
}
}
}
}
}
}
}