Isaacp's picture
First Training of SnowballTarget
b30b6b8
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5094846487045288,
"min": 0.5094846487045288,
"max": 1.4570775032043457,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15341.6015625,
"min": 15341.6015625,
"max": 44201.90234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989939.0,
"min": 29952.0,
"max": 989939.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989939.0,
"min": 29952.0,
"max": 989939.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5156944990158081,
"min": -0.15757042169570923,
"max": 0.5156944990158081,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 145.42584228515625,
"min": -37.34418869018555,
"max": 145.42584228515625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.003609584178775549,
"min": -0.0006210150895640254,
"max": 0.5131192207336426,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.0179027318954468,
"min": -0.1620849370956421,
"max": 121.6092529296875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06556671617796557,
"min": 0.06400260483523454,
"max": 0.07456115647485786,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9179340264915179,
"min": 0.4751735468439881,
"max": 1.0674046649661182,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018296625322407285,
"min": 0.0008263428757292196,
"max": 0.018296625322407285,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.256152754513702,
"min": 0.00739814496305946,
"max": 0.256152754513702,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.64141888146428e-06,
"min": 7.64141888146428e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010697986434049992,
"min": 0.00010697986434049992,
"max": 0.0033692731769089995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254710714285717,
"min": 0.10254710714285717,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356595000000003,
"min": 1.3691136000000002,
"max": 2.4431171000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002644560035714284,
"min": 0.0002644560035714284,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003702384049999998,
"min": 0.003702384049999998,
"max": 0.11232679090000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011432580649852753,
"min": 0.01111969817429781,
"max": 0.4702349007129669,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16005612909793854,
"min": 0.1556757688522339,
"max": 3.291644334793091,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 330.58620689655174,
"min": 330.58620689655174,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28761.0,
"min": 15984.0,
"max": 34103.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6024613425304943,
"min": -1.0000000521540642,
"max": 1.6166351157668475,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 141.0165981426835,
"min": -32.000001668930054,
"max": 141.0165981426835,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6024613425304943,
"min": -1.0000000521540642,
"max": 1.6166351157668475,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 141.0165981426835,
"min": -32.000001668930054,
"max": 141.0165981426835,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03843109232490282,
"min": 0.03843109232490282,
"max": 9.85769286006689,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.381936124591448,
"min": 3.381936124591448,
"max": 157.72308576107025,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676238899",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676240694"
},
"total": 1794.9430570180002,
"count": 1,
"self": 0.3218009960000927,
"children": {
"run_training.setup": {
"total": 0.10915768399991066,
"count": 1,
"self": 0.10915768399991066
},
"TrainerController.start_learning": {
"total": 1794.5120983380002,
"count": 1,
"self": 1.5843584409585674,
"children": {
"TrainerController._reset_env": {
"total": 6.229163668999718,
"count": 1,
"self": 6.229163668999718
},
"TrainerController.advance": {
"total": 1786.6124840720413,
"count": 63720,
"self": 1.6391485379999722,
"children": {
"env_step": {
"total": 1117.8290255530164,
"count": 63720,
"self": 995.0660462619908,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.76696294900466,
"count": 63720,
"self": 4.87352758907582,
"children": {
"TorchPolicy.evaluate": {
"total": 116.89343535992884,
"count": 62556,
"self": 39.33466064587992,
"children": {
"TorchPolicy.sample_actions": {
"total": 77.55877471404892,
"count": 62556,
"self": 77.55877471404892
}
}
}
}
},
"workers": {
"total": 0.9960163420209938,
"count": 63720,
"self": 0.0,
"children": {
"worker_root": {
"total": 1791.5606121159944,
"count": 63720,
"is_parallel": true,
"self": 905.833252963931,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020701080002254457,
"count": 1,
"is_parallel": true,
"self": 0.0008284250006909133,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012416829995345324,
"count": 8,
"is_parallel": true,
"self": 0.0012416829995345324
}
}
},
"UnityEnvironment.step": {
"total": 0.042839331000323,
"count": 1,
"is_parallel": true,
"self": 0.0003263219996370026,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002887999999074964,
"count": 1,
"is_parallel": true,
"self": 0.0002887999999074964
},
"communicator.exchange": {
"total": 0.04115106400058721,
"count": 1,
"is_parallel": true,
"self": 0.04115106400058721
},
"steps_from_proto": {
"total": 0.001073145000191289,
"count": 1,
"is_parallel": true,
"self": 0.00028733600083796773,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007858089993533213,
"count": 8,
"is_parallel": true,
"self": 0.0007858089993533213
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 885.7273591520634,
"count": 63719,
"is_parallel": true,
"self": 20.995677524985695,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.822081047969732,
"count": 63719,
"is_parallel": true,
"self": 14.822081047969732
},
"communicator.exchange": {
"total": 755.8924224140237,
"count": 63719,
"is_parallel": true,
"self": 755.8924224140237
},
"steps_from_proto": {
"total": 94.01717816508426,
"count": 63719,
"is_parallel": true,
"self": 18.23550575493573,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.78167241014853,
"count": 509752,
"is_parallel": true,
"self": 75.78167241014853
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 667.1443099810249,
"count": 63720,
"self": 2.9107771908993527,
"children": {
"process_trajectory": {
"total": 150.73072742213026,
"count": 63720,
"self": 150.5488990481308,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1818283739994513,
"count": 2,
"self": 0.1818283739994513
}
}
},
"_update_policy": {
"total": 513.5028053679953,
"count": 444,
"self": 185.93091509493115,
"children": {
"TorchPPOOptimizer.update": {
"total": 327.5718902730641,
"count": 22821,
"self": 327.5718902730641
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.430004865862429e-07,
"count": 1,
"self": 9.430004865862429e-07
},
"TrainerController._save_models": {
"total": 0.08609121300014522,
"count": 1,
"self": 0.0014486299996860907,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08464258300045913,
"count": 1,
"self": 0.08464258300045913
}
}
}
}
}
}
}