SnowballTarget / run_logs /timers.json
Ashraf-kasem's picture
First Push
f71b69c
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.938513994216919,
"min": 0.938513994216919,
"max": 2.870497226715088,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8991.90234375,
"min": 8991.90234375,
"max": 29459.9140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.801793098449707,
"min": 0.4187970459461212,
"max": 12.801793098449707,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2496.349609375,
"min": 81.24662780761719,
"max": 2608.298828125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07497299133712088,
"min": 0.06361101751458859,
"max": 0.07763341230456718,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2998919653484835,
"min": 0.25842798589021154,
"max": 0.38816706152283587,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20782803459202542,
"min": 0.11604219409477368,
"max": 0.30079276655234544,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8313121383681017,
"min": 0.4641687763790947,
"max": 1.3183099706383312,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.045454545454547,
"min": 3.659090909090909,
"max": 25.527272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1102.0,
"min": 161.0,
"max": 1404.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.045454545454547,
"min": 3.659090909090909,
"max": 25.527272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1102.0,
"min": 161.0,
"max": 1404.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690977986",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690978469"
},
"total": 483.881159517,
"count": 1,
"self": 0.8459211119998145,
"children": {
"run_training.setup": {
"total": 0.04788909000035346,
"count": 1,
"self": 0.04788909000035346
},
"TrainerController.start_learning": {
"total": 482.98734931499985,
"count": 1,
"self": 0.5372525829643564,
"children": {
"TrainerController._reset_env": {
"total": 6.670224444000269,
"count": 1,
"self": 6.670224444000269
},
"TrainerController.advance": {
"total": 475.5404042960354,
"count": 18218,
"self": 0.27047679601764685,
"children": {
"env_step": {
"total": 475.26992750001773,
"count": 18218,
"self": 344.95124531098554,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.03895111299153,
"count": 18218,
"self": 1.7384766859495357,
"children": {
"TorchPolicy.evaluate": {
"total": 128.300474427042,
"count": 18218,
"self": 128.300474427042
}
}
},
"workers": {
"total": 0.2797310760406617,
"count": 18218,
"self": 0.0,
"children": {
"worker_root": {
"total": 481.3496723510134,
"count": 18218,
"is_parallel": true,
"self": 229.05057897704683,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006419285999982094,
"count": 1,
"is_parallel": true,
"self": 0.004943380001350306,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001475905998631788,
"count": 10,
"is_parallel": true,
"self": 0.001475905998631788
}
}
},
"UnityEnvironment.step": {
"total": 0.09346670000013546,
"count": 1,
"is_parallel": true,
"self": 0.0016930889996729093,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004002519999630749,
"count": 1,
"is_parallel": true,
"self": 0.0004002519999630749
},
"communicator.exchange": {
"total": 0.08301451300030749,
"count": 1,
"is_parallel": true,
"self": 0.08301451300030749
},
"steps_from_proto": {
"total": 0.008358846000191988,
"count": 1,
"is_parallel": true,
"self": 0.00044326499937596964,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007915581000816019,
"count": 10,
"is_parallel": true,
"self": 0.007915581000816019
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 252.29909337396657,
"count": 18217,
"is_parallel": true,
"self": 10.841870019804446,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.330877130062618,
"count": 18217,
"is_parallel": true,
"self": 5.330877130062618
},
"communicator.exchange": {
"total": 199.06047987904458,
"count": 18217,
"is_parallel": true,
"self": 199.06047987904458
},
"steps_from_proto": {
"total": 37.06586634505493,
"count": 18217,
"is_parallel": true,
"self": 6.667265038024198,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.39860130703073,
"count": 182170,
"is_parallel": true,
"self": 30.39860130703073
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013684399982594186,
"count": 1,
"self": 0.00013684399982594186,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 471.6966622284872,
"count": 445164,
"is_parallel": true,
"self": 10.427117151574748,
"children": {
"process_trajectory": {
"total": 259.33386808691284,
"count": 445164,
"is_parallel": true,
"self": 258.143419356913,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1904487299998436,
"count": 4,
"is_parallel": true,
"self": 1.1904487299998436
}
}
},
"_update_policy": {
"total": 201.9356769899996,
"count": 90,
"is_parallel": true,
"self": 77.36228323694286,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.57339375305673,
"count": 4587,
"is_parallel": true,
"self": 124.57339375305673
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2393311480000193,
"count": 1,
"self": 0.004949748999933945,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23438139900008537,
"count": 1,
"self": 0.23438139900008537
}
}
}
}
}
}
}