danieliser's picture
First Push
e700246
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8923442363739014,
"min": 0.8923442363739014,
"max": 2.8903186321258545,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9158.12890625,
"min": 8653.87890625,
"max": 29695.134765625,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.374176025390625,
"min": -0.00461520254611969,
"max": 14.383991241455078,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2946.7060546875,
"min": -0.8953492641448975,
"max": 2948.71826171875,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.527272727272727,
"min": 2.522727272727273,
"max": 28.654545454545456,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1569.0,
"min": 111.0,
"max": 1576.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.527272727272727,
"min": 2.522727272727273,
"max": 28.654545454545456,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1569.0,
"min": 111.0,
"max": 1576.0,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06086980885779345,
"min": 0.058823424854147564,
"max": 0.07065732356816419,
"count": 97
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.06086980885779345,
"min": 0.058823424854147564,
"max": 0.07065732356816419,
"count": 97
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1482402557563601,
"min": 0.08072170176313427,
"max": 0.2968664024062831,
"count": 97
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.1482402557563601,
"min": 0.08072170176313427,
"max": 0.2968664024062831,
"count": 97
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.244499585200012e-06,
"min": 1.244499585200012e-06,
"max": 0.00029692440102520003,
"count": 97
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.244499585200012e-06,
"min": 1.244499585200012e-06,
"max": 0.00029692440102520003,
"count": 97
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10041480000000001,
"min": 0.10041480000000001,
"max": 0.1989748,
"count": 97
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10041480000000001,
"min": 0.10041480000000001,
"max": 0.1989748,
"count": 97
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.06985200000002e-05,
"min": 3.06985200000002e-05,
"max": 0.004948842519999999,
"count": 97
},
"SnowballTarget.Policy.Beta.sum": {
"value": 3.06985200000002e-05,
"min": 3.06985200000002e-05,
"max": 0.004948842519999999,
"count": 97
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685387356",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685392104"
},
"total": 4747.500766848,
"count": 1,
"self": 0.48841547000029095,
"children": {
"run_training.setup": {
"total": 0.04340411699990909,
"count": 1,
"self": 0.04340411699990909
},
"TrainerController.start_learning": {
"total": 4746.968947261,
"count": 1,
"self": 5.114859669081852,
"children": {
"TrainerController._reset_env": {
"total": 3.991836987999932,
"count": 1,
"self": 3.991836987999932
},
"TrainerController.advance": {
"total": 4737.725261504917,
"count": 181872,
"self": 2.62865422302184,
"children": {
"env_step": {
"total": 4735.096607281896,
"count": 181872,
"self": 3594.658866705777,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1137.8589556881113,
"count": 181872,
"self": 21.233470379049777,
"children": {
"TorchPolicy.evaluate": {
"total": 1116.6254853090616,
"count": 181872,
"self": 1116.6254853090616
}
}
},
"workers": {
"total": 2.5787848880070214,
"count": 181872,
"self": 0.0,
"children": {
"worker_root": {
"total": 4733.518990897057,
"count": 181872,
"is_parallel": true,
"self": 2290.3763285820314,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005748607000214179,
"count": 1,
"is_parallel": true,
"self": 0.004062546000113798,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016860610001003806,
"count": 10,
"is_parallel": true,
"self": 0.0016860610001003806
}
}
},
"UnityEnvironment.step": {
"total": 0.03698387500003264,
"count": 1,
"is_parallel": true,
"self": 0.0006701680001697241,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032914300004449615,
"count": 1,
"is_parallel": true,
"self": 0.00032914300004449615
},
"communicator.exchange": {
"total": 0.033594622999999046,
"count": 1,
"is_parallel": true,
"self": 0.033594622999999046
},
"steps_from_proto": {
"total": 0.0023899409998193732,
"count": 1,
"is_parallel": true,
"self": 0.00040843199963092047,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019815090001884528,
"count": 10,
"is_parallel": true,
"self": 0.0019815090001884528
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2443.1426623150255,
"count": 181871,
"is_parallel": true,
"self": 105.44505381493855,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 52.89960010097252,
"count": 181871,
"is_parallel": true,
"self": 52.89960010097252
},
"communicator.exchange": {
"total": 1934.1572663770094,
"count": 181871,
"is_parallel": true,
"self": 1934.1572663770094
},
"steps_from_proto": {
"total": 350.6407420221051,
"count": 181871,
"is_parallel": true,
"self": 63.7596964997465,
"children": {
"_process_rank_one_or_two_observation": {
"total": 286.8810455223586,
"count": 1818710,
"is_parallel": true,
"self": 286.8810455223586
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00033434100078011397,
"count": 1,
"self": 0.00033434100078011397,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4678.9930856826795,
"count": 6279853,
"is_parallel": true,
"self": 152.8643107338885,
"children": {
"process_trajectory": {
"total": 3473.1756873387912,
"count": 6279853,
"is_parallel": true,
"self": 3462.034507245793,
"children": {
"RLTrainer._checkpoint": {
"total": 11.141180092997956,
"count": 40,
"is_parallel": true,
"self": 11.141180092997956
}
}
},
"_update_policy": {
"total": 1052.9530876099998,
"count": 97,
"is_parallel": true,
"self": 584.6118025519165,
"children": {
"TorchPPOOptimizer.update": {
"total": 468.34128505808326,
"count": 38217,
"is_parallel": true,
"self": 468.34128505808326
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13665475800007698,
"count": 1,
"self": 0.0008705879999979516,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13578417000007903,
"count": 1,
"self": 0.13578417000007903
}
}
}
}
}
}
}