{ "name": "root", "gauges": { "Worm.Policy.Entropy.mean": { "value": 1.428634762763977, "min": 1.418938398361206, "max": 1.4288861751556396, "count": 17 }, "Worm.Policy.Entropy.sum": { "value": 42859.04296875, "min": 42568.15234375, "max": 42866.5859375, "count": 17 }, "Worm.Environment.EpisodeLength.mean": { "value": 999.0, "min": 999.0, "max": 999.0, "count": 17 }, "Worm.Environment.EpisodeLength.sum": { "value": 29970.0, "min": 29970.0, "max": 29970.0, "count": 17 }, "Worm.Step.mean": { "value": 509000.0, "min": 29000.0, "max": 509000.0, "count": 17 }, "Worm.Step.sum": { "value": 509000.0, "min": 29000.0, "max": 509000.0, "count": 17 }, "Worm.Policy.ExtrinsicValueEstimate.mean": { "value": 0.3504001200199127, "min": 0.17438696324825287, "max": 0.3504001200199127, "count": 17 }, "Worm.Policy.ExtrinsicValueEstimate.sum": { "value": 10.512003898620605, "min": 5.231608867645264, "max": 10.512003898620605, "count": 17 }, "Worm.Environment.CumulativeReward.mean": { "value": 3.974692003793704, "min": 0.20528365392237902, "max": 3.974692003793704, "count": 17 }, "Worm.Environment.CumulativeReward.sum": { "value": 119.24076011381112, "min": 5.9532259637489915, "max": 119.24076011381112, "count": 17 }, "Worm.Policy.ExtrinsicReward.mean": { "value": 3.974692003793704, "min": 0.20528365392237902, "max": 3.974692003793704, "count": 17 }, "Worm.Policy.ExtrinsicReward.sum": { "value": 119.24076011381112, "min": 5.9532259637489915, "max": 119.24076011381112, "count": 17 }, "Worm.IsTraining.mean": { "value": 0.0, "min": 0.0, "max": 1.0, "count": 17 }, "Worm.IsTraining.sum": { "value": 0.0, "min": 0.0, "max": 1.0, "count": 17 }, "Worm.Losses.PolicyLoss.mean": { "value": 0.01597467351460918, "min": 0.012571080180350691, "max": 0.02220770773627529, "count": 16 }, "Worm.Losses.PolicyLoss.sum": { "value": 0.01597467351460918, "min": 0.012571080180350691, "max": 0.02220770773627529, "count": 16 }, "Worm.Losses.ValueLoss.mean": { "value": 0.017406212720310406, "min": 0.0014133177243084425, "max": 0.017406212720310406, "count": 16 }, "Worm.Losses.ValueLoss.sum": { "value": 0.017406212720310406, "min": 0.0014133177243084425, "max": 0.017406212720310406, "count": 16 }, "Worm.Policy.LearningRate.mean": { "value": 1.2000096000000011e-05, "min": 1.2000096000000011e-05, "max": 0.00028200000599999995, "count": 16 }, "Worm.Policy.LearningRate.sum": { "value": 1.2000096000000011e-05, "min": 1.2000096000000011e-05, "max": 0.00028200000599999995, "count": 16 }, "Worm.Policy.Epsilon.mean": { "value": 0.10400000000000001, "min": 0.10400000000000001, "max": 0.194, "count": 16 }, "Worm.Policy.Epsilon.sum": { "value": 0.10400000000000001, "min": 0.10400000000000001, "max": 0.194, "count": 16 }, "Worm.Policy.Beta.mean": { "value": 0.00020960000000000022, "min": 0.00020960000000000022, "max": 0.0047006, "count": 16 }, "Worm.Policy.Beta.sum": { "value": 0.00020960000000000022, "min": 0.00020960000000000022, "max": 0.0047006, "count": 16 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1662407258", "python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Worm.yaml --env=./trained-envs-executables/linux/Worm/Worm --run-id=Worm Training --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1662407917" }, "total": 658.778968944, "count": 1, "self": 0.3867948189998742, "children": { "run_training.setup": { "total": 0.04269169100001591, "count": 1, "self": 0.04269169100001591 }, "TrainerController.start_learning": { "total": 658.349482434, "count": 1, "self": 1.0174214150044918, "children": { "TrainerController._reset_env": { "total": 9.288609135999991, "count": 1, "self": 9.288609135999991 }, "TrainerController.advance": { "total": 647.9262574909956, "count": 51000, "self": 1.0869746869987011, "children": { "env_step": { "total": 534.6039458570044, "count": 51000, "self": 468.45977514498975, "children": { "SubprocessEnvManager._take_step": { "total": 65.57130738201016, "count": 51000, "self": 4.223101729018367, "children": { "TorchPolicy.evaluate": { "total": 61.348205652991794, "count": 51000, "self": 15.330910989985284, "children": { "TorchPolicy.sample_actions": { "total": 46.01729466300651, "count": 51000, "self": 46.01729466300651 } } } } }, "workers": { "total": 0.572863330004509, "count": 51000, "self": 0.0, "children": { "worker_root": { "total": 655.9103684310032, "count": 51000, "is_parallel": true, "self": 250.29203113401485, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0010145619999661903, "count": 1, "is_parallel": true, "self": 0.00033098199992309674, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006835800000430936, "count": 2, "is_parallel": true, "self": 0.0006835800000430936 } } }, "UnityEnvironment.step": { "total": 0.029888966000044093, "count": 1, "is_parallel": true, "self": 0.0001705239999978403, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00042877300001009644, "count": 1, "is_parallel": true, "self": 0.00042877300001009644 }, "communicator.exchange": { "total": 0.02893172599999616, "count": 1, "is_parallel": true, "self": 0.02893172599999616 }, "steps_from_proto": { "total": 0.00035794300003999524, "count": 1, "is_parallel": true, "self": 0.00013223600006995184, "children": { "_process_rank_one_or_two_observation": { "total": 0.0002257069999700434, "count": 2, "is_parallel": true, "self": 0.0002257069999700434 } } } } } } }, "UnityEnvironment.step": { "total": 405.6183372969884, "count": 50999, "is_parallel": true, "self": 8.67063979999881, "children": { "UnityEnvironment._generate_step_input": { "total": 15.651258368993695, "count": 50999, "is_parallel": true, "self": 15.651258368993695 }, "communicator.exchange": { "total": 358.49210899499656, "count": 50999, "is_parallel": true, "self": 358.49210899499656 }, "steps_from_proto": { "total": 22.804330132999326, "count": 50999, "is_parallel": true, "self": 8.956747805012014, "children": { "_process_rank_one_or_two_observation": { "total": 13.847582327987311, "count": 101998, "is_parallel": true, "self": 13.847582327987311 } } } } } } } } } } }, "trainer_advance": { "total": 112.2353369469925, "count": 51000, "self": 1.356951336996019, "children": { "process_trajectory": { "total": 34.14090397099642, "count": 51000, "self": 34.01851668299639, "children": { "RLTrainer._checkpoint": { "total": 0.12238728800002718, "count": 1, "self": 0.12238728800002718 } } }, "_update_policy": { "total": 76.73748163900007, "count": 16, "self": 66.82305585299991, "children": { "TorchPPOOptimizer.update": { "total": 9.914425786000152, "count": 672, "self": 9.914425786000152 } } } } } } }, "trainer_threads": { "total": 1.0779999684018549e-06, "count": 1, "self": 1.0779999684018549e-06 }, "TrainerController._save_models": { "total": 0.1171933140000192, "count": 1, "self": 0.0023542819999420317, "children": { "RLTrainer._checkpoint": { "total": 0.11483903200007717, "count": 1, "self": 0.11483903200007717 } } } } } } }