ppo-Huggy / run_logs /timers.json
ashutosh1919's picture
Huggy
bd99f1a
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.40358304977417,
"min": 1.40358304977417,
"max": 1.4275671243667603,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70127.21875,
"min": 68335.984375,
"max": 78709.6484375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.72280701754386,
"min": 86.13763066202091,
"max": 403.61290322580646,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49432.0,
"min": 49145.0,
"max": 50191.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999953.0,
"min": 49846.0,
"max": 1999953.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999953.0,
"min": 49846.0,
"max": 1999953.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4335739612579346,
"min": 0.22307144105434418,
"max": 2.4650888442993164,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1387.13720703125,
"min": 27.437788009643555,
"max": 1402.2105712890625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.757414835482313,
"min": 1.961903014561025,
"max": 4.01754723632403,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2141.7264562249184,
"min": 241.3140707910061,
"max": 2170.72803992033,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.757414835482313,
"min": 1.961903014561025,
"max": 4.01754723632403,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2141.7264562249184,
"min": 241.3140707910061,
"max": 2170.72803992033,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01665457813974677,
"min": 0.012483907513402906,
"max": 0.019459624055889434,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.049963734419240305,
"min": 0.026929914254287722,
"max": 0.05522323688104128,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.057640505623486306,
"min": 0.020540864175806445,
"max": 0.05897592157125473,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17292151687045892,
"min": 0.04108172835161289,
"max": 0.1769277647137642,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.365598878166666e-06,
"min": 3.365598878166666e-06,
"max": 0.00029536200154599995,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0096796634499998e-05,
"min": 1.0096796634499998e-05,
"max": 0.0008442195185935,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10112183333333336,
"min": 0.10112183333333336,
"max": 0.19845399999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30336550000000007,
"min": 0.20738224999999996,
"max": 0.5814065,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.597948333333333e-05,
"min": 6.597948333333333e-05,
"max": 0.0049228546,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019793845,
"min": 0.00019793845,
"max": 0.014072184350000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672941032",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672943424"
},
"total": 2392.057608997,
"count": 1,
"self": 0.3839657050002643,
"children": {
"run_training.setup": {
"total": 0.10879844000010053,
"count": 1,
"self": 0.10879844000010053
},
"TrainerController.start_learning": {
"total": 2391.5648448519996,
"count": 1,
"self": 4.132852988955619,
"children": {
"TrainerController._reset_env": {
"total": 7.121132347999946,
"count": 1,
"self": 7.121132347999946
},
"TrainerController.advance": {
"total": 2380.1910289580437,
"count": 232047,
"self": 4.508717935274035,
"children": {
"env_step": {
"total": 1887.8281167409466,
"count": 232047,
"self": 1587.6063242030223,
"children": {
"SubprocessEnvManager._take_step": {
"total": 297.3802512819584,
"count": 232047,
"self": 15.668862674966704,
"children": {
"TorchPolicy.evaluate": {
"total": 281.7113886069917,
"count": 222999,
"self": 69.04030021601397,
"children": {
"TorchPolicy.sample_actions": {
"total": 212.67108839097773,
"count": 222999,
"self": 212.67108839097773
}
}
}
}
},
"workers": {
"total": 2.8415412559659217,
"count": 232047,
"self": 0.0,
"children": {
"worker_root": {
"total": 2382.97799883905,
"count": 232047,
"is_parallel": true,
"self": 1077.2487858889788,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017335040001853486,
"count": 1,
"is_parallel": true,
"self": 0.0003330210001877276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001400482999997621,
"count": 2,
"is_parallel": true,
"self": 0.001400482999997621
}
}
},
"UnityEnvironment.step": {
"total": 0.03243245599992406,
"count": 1,
"is_parallel": true,
"self": 0.0003398439998818503,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022082400005274394,
"count": 1,
"is_parallel": true,
"self": 0.00022082400005274394
},
"communicator.exchange": {
"total": 0.03100059100006547,
"count": 1,
"is_parallel": true,
"self": 0.03100059100006547
},
"steps_from_proto": {
"total": 0.0008711969999239955,
"count": 1,
"is_parallel": true,
"self": 0.000303121000115425,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005680759998085705,
"count": 2,
"is_parallel": true,
"self": 0.0005680759998085705
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1305.7292129500713,
"count": 232046,
"is_parallel": true,
"self": 36.859462708071305,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.28554225793118,
"count": 232046,
"is_parallel": true,
"self": 85.28554225793118
},
"communicator.exchange": {
"total": 1082.8776438109326,
"count": 232046,
"is_parallel": true,
"self": 1082.8776438109326
},
"steps_from_proto": {
"total": 100.70656417313626,
"count": 232046,
"is_parallel": true,
"self": 44.04921635012943,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.65734782300683,
"count": 464092,
"is_parallel": true,
"self": 56.65734782300683
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 487.8541942818233,
"count": 232047,
"self": 6.335036595768997,
"children": {
"process_trajectory": {
"total": 157.35000511205385,
"count": 232047,
"self": 156.140599760055,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2094053519988393,
"count": 10,
"self": 1.2094053519988393
}
}
},
"_update_policy": {
"total": 324.16915257400046,
"count": 97,
"self": 270.6241105770057,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.54504199699477,
"count": 2910,
"self": 53.54504199699477
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.027000052999938e-06,
"count": 1,
"self": 1.027000052999938e-06
},
"TrainerController._save_models": {
"total": 0.11982953000006091,
"count": 1,
"self": 0.001989337999930285,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11784019200013063,
"count": 1,
"self": 0.11784019200013063
}
}
}
}
}
}
}