pyramids / run_logs /timers.json
SGme's picture
qw
3a5ff69
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6858731508255005,
"min": 0.6802798509597778,
"max": 1.3942458629608154,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20674.9609375,
"min": 20546.462890625,
"max": 42295.84375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.024597879499197006,
"min": -0.13248559832572937,
"max": 0.024597879499197006,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.001882553100586,
"min": -31.39908790588379,
"max": 6.001882553100586,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022965509444475174,
"min": 0.015016665682196617,
"max": 0.30718544125556946,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.603584289550781,
"min": 3.619016408920288,
"max": 74.03169250488281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06884264806746107,
"min": 0.06543262210868282,
"max": 0.07405936573156563,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.963797072944455,
"min": 0.48560728723331803,
"max": 1.046625895714433,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004749224507946711,
"min": 6.643410630810209e-05,
"max": 0.009164674884044056,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.06648914311125396,
"min": 0.0009300774883134293,
"max": 0.10142323369200312,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.344354694771429e-06,
"min": 7.344354694771429e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010282096572680001,
"min": 0.00010282096572680001,
"max": 0.003140721553092899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244808571428572,
"min": 0.10244808571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342732,
"min": 1.3265935999999998,
"max": 2.3469071,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025456376285714293,
"min": 0.00025456376285714293,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035638926800000007,
"min": 0.0035638926800000007,
"max": 0.10471601928999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01644657738506794,
"min": 0.01644657738506794,
"max": 0.4698963463306427,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23025208711624146,
"min": 0.18557105958461761,
"max": 3.2892744541168213,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 877.2424242424242,
"min": 807.578947368421,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28949.0,
"min": 15984.0,
"max": 33067.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.27149700689496414,
"min": -1.2281125732697546,
"max": 0.13946838108332535,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -8.959401227533817,
"min": -39.29960234463215,
"max": 5.299798481166363,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.27149700689496414,
"min": -1.2281125732697546,
"max": 0.13946838108332535,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -8.959401227533817,
"min": -39.29960234463215,
"max": 5.299798481166363,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.14775762952525506,
"min": 0.14236713737390297,
"max": 9.939638264477253,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.876001774333417,
"min": 3.6765784872695804,
"max": 159.03421223163605,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1658945802",
"python_version": "3.8.13 (default, Mar 28 2022, 06:59:08) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\sgstq\\miniconda3\\envs\\hugfrl\\Scripts\\mlagents-learn config/ppo/PyramidsRND.yaml --env=.\\trained-envs-executables\\windows\\Pyramids.exe --run-id=First Training --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.23.1",
"end_time_seconds": "1658949175"
},
"total": 3372.5849983,
"count": 1,
"self": 0.5663061999998718,
"children": {
"run_training.setup": {
"total": 0.2578105000000006,
"count": 1,
"self": 0.2578105000000006
},
"TrainerController.start_learning": {
"total": 3371.7608816,
"count": 1,
"self": 2.47262139997747,
"children": {
"TrainerController._reset_env": {
"total": 13.3283053,
"count": 1,
"self": 13.3283053
},
"TrainerController.advance": {
"total": 3355.8058921000224,
"count": 63024,
"self": 2.5802328000731904,
"children": {
"env_step": {
"total": 1706.6492662999535,
"count": 63024,
"self": 1377.5888684999454,
"children": {
"SubprocessEnvManager._take_step": {
"total": 327.46906000000337,
"count": 63025,
"self": 8.441055599959043,
"children": {
"TorchPolicy.evaluate": {
"total": 319.0280044000443,
"count": 62611,
"self": 104.92986340007155,
"children": {
"TorchPolicy.sample_actions": {
"total": 214.09814099997277,
"count": 62611,
"self": 214.09814099997277
}
}
}
}
},
"workers": {
"total": 1.591337800004851,
"count": 63024,
"self": 0.0,
"children": {
"worker_root": {
"total": 3346.181029500018,
"count": 63024,
"is_parallel": true,
"self": 2157.7525975000017,
"children": {
"steps_from_proto": {
"total": 0.003304900000000721,
"count": 2,
"is_parallel": true,
"self": 0.0008608999999992761,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002444000000001445,
"count": 16,
"is_parallel": true,
"self": 0.002444000000001445
}
}
},
"UnityEnvironment.step": {
"total": 1188.4251271000164,
"count": 63024,
"is_parallel": true,
"self": 34.4306848000208,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.589346599993707,
"count": 63024,
"is_parallel": true,
"self": 27.589346599993707
},
"communicator.exchange": {
"total": 1023.3885093000114,
"count": 63024,
"is_parallel": true,
"self": 1023.3885093000114
},
"steps_from_proto": {
"total": 103.0165863999904,
"count": 63024,
"is_parallel": true,
"self": 28.484447699989104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.5321387000013,
"count": 504192,
"is_parallel": true,
"self": 74.5321387000013
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1646.5763929999957,
"count": 63024,
"self": 3.765892799990752,
"children": {
"process_trajectory": {
"total": 293.12345880000424,
"count": 63024,
"self": 292.754084600004,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3693742000002658,
"count": 2,
"self": 0.3693742000002658
}
}
},
"_update_policy": {
"total": 1349.6870414000007,
"count": 417,
"self": 326.9591998999887,
"children": {
"TorchPPOOptimizer.update": {
"total": 1022.727841500012,
"count": 22980,
"self": 1022.727841500012
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.500000053056283e-06,
"count": 1,
"self": 1.500000053056283e-06
},
"TrainerController._save_models": {
"total": 0.15406130000019402,
"count": 1,
"self": 0.010607400000026246,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14345390000016778,
"count": 1,
"self": 0.14345390000016778
}
}
}
}
}
}
}