ppo_2-Huggy / run_logs /timers.json
daripaez's picture
Huggy training with changed parameters
4073370
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.41143798828125,
"min": 1.41143798828125,
"max": 1.4279805421829224,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69689.75,
"min": 69689.75,
"max": 76422.3125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.66830065359477,
"min": 72.27596439169139,
"max": 393.21259842519686,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49369.0,
"min": 48714.0,
"max": 50105.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999984.0,
"min": 49315.0,
"max": 1999984.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999984.0,
"min": 49315.0,
"max": 1999984.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4064347743988037,
"min": 0.07408761978149414,
"max": 2.5412755012512207,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1472.738037109375,
"min": 9.335040092468262,
"max": 1684.5467529296875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.723787181030691,
"min": 1.7094726738712145,
"max": 4.026517086641915,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2278.957754790783,
"min": 215.39355690777302,
"max": 2678.4918880462646,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.723787181030691,
"min": 1.7094726738712145,
"max": 4.026517086641915,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2278.957754790783,
"min": 215.39355690777302,
"max": 2678.4918880462646,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015272000638611241,
"min": 0.01125135964891039,
"max": 0.018872484372695907,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04581600191583372,
"min": 0.02250271929782078,
"max": 0.0543550950329518,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.07671633640097247,
"min": 0.03514072662219406,
"max": 0.08354678278168043,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.23014900920291742,
"min": 0.07028145324438811,
"max": 0.2506403483450413,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.833248722283329e-06,
"min": 3.833248722283329e-06,
"max": 0.00029527545157485,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1499746166849987e-05,
"min": 1.1499746166849987e-05,
"max": 0.0008439292686902499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10127771666666667,
"min": 0.10127771666666667,
"max": 0.19842514999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30383315,
"min": 0.20768770000000003,
"max": 0.5813097500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.37580616666666e-05,
"min": 7.37580616666666e-05,
"max": 0.004921414985000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002212741849999998,
"min": 0.0002212741849999998,
"max": 0.014067356525,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670443792",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670446098"
},
"total": 2306.2599956450003,
"count": 1,
"self": 0.605730381000285,
"children": {
"run_training.setup": {
"total": 0.12952445800010537,
"count": 1,
"self": 0.12952445800010537
},
"TrainerController.start_learning": {
"total": 2305.524740806,
"count": 1,
"self": 4.011852011951305,
"children": {
"TrainerController._reset_env": {
"total": 10.924712892999878,
"count": 1,
"self": 10.924712892999878
},
"TrainerController.advance": {
"total": 2290.458943278049,
"count": 232739,
"self": 4.202835299140588,
"children": {
"env_step": {
"total": 1793.5784517619816,
"count": 232739,
"self": 1500.748172544228,
"children": {
"SubprocessEnvManager._take_step": {
"total": 290.12723739680155,
"count": 232739,
"self": 14.850751969881912,
"children": {
"TorchPolicy.evaluate": {
"total": 275.27648542691963,
"count": 222908,
"self": 68.49190493505375,
"children": {
"TorchPolicy.sample_actions": {
"total": 206.78458049186588,
"count": 222908,
"self": 206.78458049186588
}
}
}
}
},
"workers": {
"total": 2.7030418209519667,
"count": 232739,
"self": 0.0,
"children": {
"worker_root": {
"total": 2297.3206149560733,
"count": 232739,
"is_parallel": true,
"self": 1066.8256711380868,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0032839799998782837,
"count": 1,
"is_parallel": true,
"self": 0.0004196140000658488,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002864365999812435,
"count": 2,
"is_parallel": true,
"self": 0.002864365999812435
}
}
},
"UnityEnvironment.step": {
"total": 0.028319634999888876,
"count": 1,
"is_parallel": true,
"self": 0.0002884099997118028,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021008500016250764,
"count": 1,
"is_parallel": true,
"self": 0.00021008500016250764
},
"communicator.exchange": {
"total": 0.027038203000074645,
"count": 1,
"is_parallel": true,
"self": 0.027038203000074645
},
"steps_from_proto": {
"total": 0.0007829369999399205,
"count": 1,
"is_parallel": true,
"self": 0.0002530669996758661,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005298700002640544,
"count": 2,
"is_parallel": true,
"self": 0.0005298700002640544
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1230.4949438179865,
"count": 232738,
"is_parallel": true,
"self": 35.77690313101016,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.79236143783555,
"count": 232738,
"is_parallel": true,
"self": 76.79236143783555
},
"communicator.exchange": {
"total": 1022.6353213770349,
"count": 232738,
"is_parallel": true,
"self": 1022.6353213770349
},
"steps_from_proto": {
"total": 95.29035787210591,
"count": 232738,
"is_parallel": true,
"self": 39.19151140310464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.09884646900127,
"count": 465476,
"is_parallel": true,
"self": 56.09884646900127
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 492.6776562169266,
"count": 232739,
"self": 6.342259729965008,
"children": {
"process_trajectory": {
"total": 158.0393562959605,
"count": 232739,
"self": 157.55080413496057,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48855216099991594,
"count": 4,
"self": 0.48855216099991594
}
}
},
"_update_policy": {
"total": 328.2960401910011,
"count": 97,
"self": 273.4613900010022,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.83465018999891,
"count": 2910,
"self": 54.83465018999891
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2959999367012642e-06,
"count": 1,
"self": 1.2959999367012642e-06
},
"TrainerController._save_models": {
"total": 0.12923132700007045,
"count": 1,
"self": 0.002816511999753857,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1264148150003166,
"count": 1,
"self": 0.1264148150003166
}
}
}
}
}
}
}