ppo-Huggy / run_logs /timers.json
charmquark's picture
Huggy
e7d7d4c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4018298387527466,
"min": 1.4018298387527466,
"max": 1.4267714023590088,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70685.8671875,
"min": 69144.03125,
"max": 76814.703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 73.30609212481427,
"min": 71.71511627906976,
"max": 361.76978417266184,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49335.0,
"min": 48779.0,
"max": 50286.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999842.0,
"min": 49940.0,
"max": 1999842.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999842.0,
"min": 49940.0,
"max": 1999842.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.54508900642395,
"min": 0.14341001212596893,
"max": 2.5594727993011475,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1712.8448486328125,
"min": 19.79058265686035,
"max": 1725.8233642578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9572967648683295,
"min": 1.833044314082118,
"max": 4.051893830578338,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2663.260722756386,
"min": 252.9601153433323,
"max": 2703.8506847023964,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9572967648683295,
"min": 1.833044314082118,
"max": 4.051893830578338,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2663.260722756386,
"min": 252.9601153433323,
"max": 2703.8506847023964,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018948917248053476,
"min": 0.013041663890665706,
"max": 0.019718869257242963,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05684675174416043,
"min": 0.026083327781331413,
"max": 0.059115004337703184,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06485683065321711,
"min": 0.021156798396259544,
"max": 0.06485683065321711,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.19457049195965131,
"min": 0.04231359679251909,
"max": 0.19457049195965131,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.8019487327166734e-06,
"min": 3.8019487327166734e-06,
"max": 0.00029536155154614996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.140584619815002e-05,
"min": 1.140584619815002e-05,
"max": 0.0008439672186775997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10126728333333333,
"min": 0.10126728333333333,
"max": 0.19845385000000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30380185,
"min": 0.20769700000000002,
"max": 0.5813223999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.323743833333343e-05,
"min": 7.323743833333343e-05,
"max": 0.004922847115000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002197123150000003,
"min": 0.0002197123150000003,
"max": 0.014067987759999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678178421",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678180878"
},
"total": 2456.773984026,
"count": 1,
"self": 0.49178970100001607,
"children": {
"run_training.setup": {
"total": 0.12309803099992678,
"count": 1,
"self": 0.12309803099992678
},
"TrainerController.start_learning": {
"total": 2456.159096294,
"count": 1,
"self": 4.391076706930107,
"children": {
"TrainerController._reset_env": {
"total": 10.52458995699999,
"count": 1,
"self": 10.52458995699999
},
"TrainerController.advance": {
"total": 2441.1242485970697,
"count": 233375,
"self": 4.748506028043266,
"children": {
"env_step": {
"total": 1893.4573858460276,
"count": 233375,
"self": 1586.3122889719255,
"children": {
"SubprocessEnvManager._take_step": {
"total": 304.24895152710053,
"count": 233375,
"self": 16.13710509116231,
"children": {
"TorchPolicy.evaluate": {
"total": 288.1118464359382,
"count": 222951,
"self": 73.28033123497073,
"children": {
"TorchPolicy.sample_actions": {
"total": 214.8315152009675,
"count": 222951,
"self": 214.8315152009675
}
}
}
}
},
"workers": {
"total": 2.8961453470016068,
"count": 233375,
"self": 0.0,
"children": {
"worker_root": {
"total": 2447.6404286129673,
"count": 233375,
"is_parallel": true,
"self": 1159.634771367909,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009755080000104499,
"count": 1,
"is_parallel": true,
"self": 0.0003676620000305775,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006078459999798724,
"count": 2,
"is_parallel": true,
"self": 0.0006078459999798724
}
}
},
"UnityEnvironment.step": {
"total": 0.029523709000045528,
"count": 1,
"is_parallel": true,
"self": 0.0002923740000824182,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002277479999293064,
"count": 1,
"is_parallel": true,
"self": 0.0002277479999293064
},
"communicator.exchange": {
"total": 0.028273493000028793,
"count": 1,
"is_parallel": true,
"self": 0.028273493000028793
},
"steps_from_proto": {
"total": 0.0007300940000050105,
"count": 1,
"is_parallel": true,
"self": 0.00026242900003126124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004676649999737492,
"count": 2,
"is_parallel": true,
"self": 0.0004676649999737492
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1288.0056572450583,
"count": 233374,
"is_parallel": true,
"self": 39.798404870981585,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.67900707901276,
"count": 233374,
"is_parallel": true,
"self": 79.67900707901276
},
"communicator.exchange": {
"total": 1074.0871875469857,
"count": 233374,
"is_parallel": true,
"self": 1074.0871875469857
},
"steps_from_proto": {
"total": 94.44105774807826,
"count": 233374,
"is_parallel": true,
"self": 38.012712885992755,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.42834486208551,
"count": 466748,
"is_parallel": true,
"self": 56.42834486208551
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 542.9183567229991,
"count": 233375,
"self": 6.683653935055872,
"children": {
"process_trajectory": {
"total": 170.82092493794266,
"count": 233375,
"self": 169.5626245519427,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2583003859999735,
"count": 10,
"self": 1.2583003859999735
}
}
},
"_update_policy": {
"total": 365.41377785000054,
"count": 97,
"self": 306.6357590879961,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.77801876200442,
"count": 2910,
"self": 58.77801876200442
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.420000424142927e-07,
"count": 1,
"self": 9.420000424142927e-07
},
"TrainerController._save_models": {
"total": 0.1191800910000893,
"count": 1,
"self": 0.0029933980003988836,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11618669299969042,
"count": 1,
"self": 0.11618669299969042
}
}
}
}
}
}
}