Sat_Shooter00 / run_logs /timers.json
AtilliO's picture
first sat shooter
3ef8fd9
{
"name": "root",
"gauges": {
"ShooterBasic.Policy.Entropy.mean": {
"value": 1.0800338983535767,
"min": 0.8503435254096985,
"max": 2.6381008625030518,
"count": 250
},
"ShooterBasic.Policy.Entropy.sum": {
"value": 2158.98779296875,
"min": 1699.836669921875,
"max": 5278.83984375,
"count": 250
},
"ShooterBasic.Step.mean": {
"value": 499997.0,
"min": 1998.0,
"max": 499997.0,
"count": 250
},
"ShooterBasic.Step.sum": {
"value": 499997.0,
"min": 1998.0,
"max": 499997.0,
"count": 250
},
"ShooterBasic.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.004754199180752039,
"min": -0.19113971292972565,
"max": 0.13820959627628326,
"count": 250
},
"ShooterBasic.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.171050786972046,
"min": -130.73956298828125,
"max": 92.04759216308594,
"count": 250
},
"ShooterBasic.Losses.PolicyLoss.mean": {
"value": 0.12840254371015664,
"min": 0.1089587819574046,
"max": 0.15476292033591085,
"count": 250
},
"ShooterBasic.Losses.PolicyLoss.sum": {
"value": 1.027220349681253,
"min": 0.7627114737018322,
"max": 1.175379527247666,
"count": 250
},
"ShooterBasic.Losses.ValueLoss.mean": {
"value": 1.8283528637613733e-07,
"min": 8.446994632319711e-08,
"max": 0.028192297885728596,
"count": 250
},
"ShooterBasic.Losses.ValueLoss.sum": {
"value": 1.4626822910090986e-06,
"min": 6.742508425842489e-07,
"max": 0.22553838308582877,
"count": 250
},
"ShooterBasic.Policy.LearningRate.mean": {
"value": 5.815748061750049e-07,
"min": 5.815748061750049e-07,
"max": 0.00029938080020639997,
"count": 250
},
"ShooterBasic.Policy.LearningRate.sum": {
"value": 4.652598449400039e-06,
"min": 4.652598449400039e-06,
"max": 0.0023857548047483998,
"count": 250
},
"ShooterBasic.Policy.Epsilon.mean": {
"value": 0.10019382500000001,
"min": 0.10019382500000001,
"max": 0.19979360000000002,
"count": 250
},
"ShooterBasic.Policy.Epsilon.sum": {
"value": 0.8015506000000001,
"min": 0.706958,
"max": 1.5952516,
"count": 250
},
"ShooterBasic.Policy.Beta.mean": {
"value": 1.9671867500000083e-05,
"min": 1.9671867500000083e-05,
"max": 0.00498970064,
"count": 250
},
"ShooterBasic.Policy.Beta.sum": {
"value": 0.00015737494000000066,
"min": 0.00015737494000000066,
"max": 0.039763054840000006,
"count": 250
},
"ShooterBasic.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 250
},
"ShooterBasic.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 250
},
"ShooterBasic.Environment.EpisodeLength.mean": {
"value": 2499.0,
"min": 32.450980392156865,
"max": 2499.0,
"count": 200
},
"ShooterBasic.Environment.EpisodeLength.sum": {
"value": 2499.0,
"min": 1367.0,
"max": 3538.0,
"count": 200
},
"ShooterBasic.Environment.CumulativeReward.mean": {
"value": -0.9995999505044892,
"min": -5.5996002707397565,
"max": 0.013200071291066706,
"count": 200
},
"ShooterBasic.Environment.CumulativeReward.sum": {
"value": -0.9995999505044892,
"min": -51.72619968513027,
"max": 0.013200071291066706,
"count": 200
},
"ShooterBasic.Policy.ExtrinsicReward.mean": {
"value": -0.9995999505044892,
"min": -5.5996002707397565,
"max": 0.013200071291066706,
"count": 200
},
"ShooterBasic.Policy.ExtrinsicReward.sum": {
"value": -0.9995999505044892,
"min": -51.72619968513027,
"max": 0.013200071291066706,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692211388",
"python_version": "3.9.15 | packaged by conda-forge | (main, Nov 22 2022, 08:45:29) \n[GCC 10.4.0]",
"command_line_arguments": "/opt/saturncloud/envs/saturn/bin/mlagents-learn ./config/ppo/ShooterOrange.yaml --env=./training-envs-executables/linux/Shooter_00/Shooter_00 --run-id=Sat_Shooter00 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692213707"
},
"total": 2318.237145102,
"count": 1,
"self": 0.5394069160001891,
"children": {
"run_training.setup": {
"total": 0.06534231099999488,
"count": 1,
"self": 0.06534231099999488
},
"TrainerController.start_learning": {
"total": 2317.632395875,
"count": 1,
"self": 8.648253458966792,
"children": {
"TrainerController._reset_env": {
"total": 0.7743355720001546,
"count": 1,
"self": 0.7743355720001546
},
"TrainerController.advance": {
"total": 2308.137043762033,
"count": 500257,
"self": 7.96894494797516,
"children": {
"env_step": {
"total": 1909.1429837810936,
"count": 500257,
"self": 1475.344438942978,
"children": {
"SubprocessEnvManager._take_step": {
"total": 428.87653917204784,
"count": 500257,
"self": 25.166202942008567,
"children": {
"TorchPolicy.evaluate": {
"total": 403.7103362300393,
"count": 500003,
"self": 403.7103362300393
}
}
},
"workers": {
"total": 4.922005666067662,
"count": 500257,
"self": 0.0,
"children": {
"worker_root": {
"total": 2294.198203800801,
"count": 500257,
"is_parallel": true,
"self": 1306.1283295438036,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027121129999159166,
"count": 1,
"is_parallel": true,
"self": 0.0013559829997120687,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013561300002038479,
"count": 4,
"is_parallel": true,
"self": 0.0013561300002038479
}
}
},
"UnityEnvironment.step": {
"total": 0.02613363999989815,
"count": 1,
"is_parallel": true,
"self": 0.00012511399995673855,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001400779999585211,
"count": 1,
"is_parallel": true,
"self": 0.0001400779999585211
},
"communicator.exchange": {
"total": 0.02534710999998424,
"count": 1,
"is_parallel": true,
"self": 0.02534710999998424
},
"steps_from_proto": {
"total": 0.00052133799999865,
"count": 1,
"is_parallel": true,
"self": 0.0002898800000821211,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00023145799991652893,
"count": 4,
"is_parallel": true,
"self": 0.00023145799991652893
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 988.0698742569973,
"count": 500256,
"is_parallel": true,
"self": 45.49155122110369,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 38.3971579257618,
"count": 500256,
"is_parallel": true,
"self": 38.3971579257618
},
"communicator.exchange": {
"total": 704.5933176891695,
"count": 500256,
"is_parallel": true,
"self": 704.5933176891695
},
"steps_from_proto": {
"total": 199.58784742096236,
"count": 500256,
"is_parallel": true,
"self": 114.80819607772673,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.77965134323563,
"count": 2001024,
"is_parallel": true,
"self": 84.77965134323563
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 391.0251150329643,
"count": 500257,
"self": 8.844813791147544,
"children": {
"process_trajectory": {
"total": 168.9216901758084,
"count": 500257,
"self": 168.8288259448084,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09286423100002139,
"count": 1,
"self": 0.09286423100002139
}
}
},
"_update_policy": {
"total": 213.25861106600837,
"count": 1937,
"self": 82.45213413402757,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.8064769319808,
"count": 46488,
"self": 130.8064769319808
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1739998626580928e-06,
"count": 1,
"self": 1.1739998626580928e-06
},
"TrainerController._save_models": {
"total": 0.07276190800030236,
"count": 1,
"self": 0.0003258840006310493,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0724360239996713,
"count": 1,
"self": 0.0724360239996713
}
}
}
}
}
}
}