YojitShinde's picture
First Push
5d86f30
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7424570322036743,
"min": 0.7424570322036743,
"max": 2.8728978633880615,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7260.4873046875,
"min": 7238.59619140625,
"max": 29421.34765625,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.564081192016602,
"min": 0.4222921133041382,
"max": 13.86706829071045,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2780.63671875,
"min": 81.92466735839844,
"max": 2828.8818359375,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06349361954969081,
"min": 0.06096216019382244,
"max": 0.07548993266605333,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.317468097748454,
"min": 0.24384864077528975,
"max": 0.37266896648547043,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17269703772138148,
"min": 0.13384080690779157,
"max": 0.2730473832172506,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8634851886069074,
"min": 0.5353632276311663,
"max": 1.3291282320723812,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.69090909090909,
"min": 3.3181818181818183,
"max": 27.272727272727273,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1468.0,
"min": 146.0,
"max": 1500.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.69090909090909,
"min": 3.3181818181818183,
"max": 27.272727272727273,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1468.0,
"min": 146.0,
"max": 1500.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689530628",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689531766"
},
"total": 1137.830814101,
"count": 1,
"self": 0.43652898799996365,
"children": {
"run_training.setup": {
"total": 0.04162648099998023,
"count": 1,
"self": 0.04162648099998023
},
"TrainerController.start_learning": {
"total": 1137.352658632,
"count": 1,
"self": 1.197892571983175,
"children": {
"TrainerController._reset_env": {
"total": 4.111410257999978,
"count": 1,
"self": 4.111410257999978
},
"TrainerController.advance": {
"total": 1131.897229610017,
"count": 45500,
"self": 0.5923195870141171,
"children": {
"env_step": {
"total": 1131.3049100230028,
"count": 45500,
"self": 825.0084598060165,
"children": {
"SubprocessEnvManager._take_step": {
"total": 305.6755188320143,
"count": 45500,
"self": 4.384614046994898,
"children": {
"TorchPolicy.evaluate": {
"total": 301.2909047850194,
"count": 45500,
"self": 301.2909047850194
}
}
},
"workers": {
"total": 0.620931384971982,
"count": 45500,
"self": 0.0,
"children": {
"worker_root": {
"total": 1133.8241881649878,
"count": 45500,
"is_parallel": true,
"self": 532.3521887960088,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006812465000052725,
"count": 1,
"is_parallel": true,
"self": 0.005123583000340659,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016888819997120663,
"count": 10,
"is_parallel": true,
"self": 0.0016888819997120663
}
}
},
"UnityEnvironment.step": {
"total": 0.04091799200000423,
"count": 1,
"is_parallel": true,
"self": 0.0009967490000235557,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043535099996461213,
"count": 1,
"is_parallel": true,
"self": 0.00043535099996461213
},
"communicator.exchange": {
"total": 0.03742620899993199,
"count": 1,
"is_parallel": true,
"self": 0.03742620899993199
},
"steps_from_proto": {
"total": 0.0020596830000840782,
"count": 1,
"is_parallel": true,
"self": 0.0003472610004564558,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017124219996276224,
"count": 10,
"is_parallel": true,
"self": 0.0017124219996276224
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 601.471999368979,
"count": 45499,
"is_parallel": true,
"self": 26.473422094982084,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.94251987898042,
"count": 45499,
"is_parallel": true,
"self": 12.94251987898042
},
"communicator.exchange": {
"total": 475.1824613870084,
"count": 45499,
"is_parallel": true,
"self": 475.1824613870084
},
"steps_from_proto": {
"total": 86.87359600800812,
"count": 45499,
"is_parallel": true,
"self": 15.480728640993448,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.39286736701467,
"count": 454990,
"is_parallel": true,
"self": 71.39286736701467
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.0205999994213926e-05,
"count": 1,
"self": 4.0205999994213926e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1123.2151634338734,
"count": 1050186,
"is_parallel": true,
"self": 23.101019192988588,
"children": {
"process_trajectory": {
"total": 610.8098345558825,
"count": 1050186,
"is_parallel": true,
"self": 608.2720062568826,
"children": {
"RLTrainer._checkpoint": {
"total": 2.537828298999898,
"count": 10,
"is_parallel": true,
"self": 2.537828298999898
}
}
},
"_update_policy": {
"total": 489.3043096850023,
"count": 227,
"is_parallel": true,
"self": 192.8302572620039,
"children": {
"TorchPPOOptimizer.update": {
"total": 296.4740524229984,
"count": 11574,
"is_parallel": true,
"self": 296.4740524229984
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1460859859998891,
"count": 1,
"self": 0.0009184329996969609,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14516755300019213,
"count": 1,
"self": 0.14516755300019213
}
}
}
}
}
}
}