wirthy21's picture
First Push from colab
c4dce85 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6373741626739502,
"min": 0.6334901452064514,
"max": 1.4763880968093872,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19274.1953125,
"min": 18741.171875,
"max": 44787.7109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989998.0,
"min": 29952.0,
"max": 989998.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989998.0,
"min": 29952.0,
"max": 989998.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.22272032499313354,
"min": -0.10300801694393158,
"max": 0.2238330990076065,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 57.46184539794922,
"min": -24.824932098388672,
"max": 57.46184539794922,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.030431821942329407,
"min": -0.030431821942329407,
"max": 0.367421418428421,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -7.851409912109375,
"min": -7.851409912109375,
"max": 87.07887268066406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06790833482089166,
"min": 0.06461895713031537,
"max": 0.07365442090082362,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9507166874924832,
"min": 0.5155809463057653,
"max": 1.0450796839249246,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011459252276836196,
"min": 0.00019462852553086006,
"max": 0.011459252276836196,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16042953187570674,
"min": 0.0021409137808394607,
"max": 0.16042953187570674,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3423832668571485e-06,
"min": 7.3423832668571485e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010279336573600008,
"min": 0.00010279336573600008,
"max": 0.0031186870604376997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244742857142856,
"min": 0.10244742857142856,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434264,
"min": 1.3886848,
"max": 2.3589964000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025449811428571444,
"min": 0.00025449811428571444,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003562973600000002,
"min": 0.003562973600000002,
"max": 0.10397227376999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015257926657795906,
"min": 0.01466931402683258,
"max": 0.3854671120643616,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21361097693443298,
"min": 0.20537039637565613,
"max": 2.698269844055176,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 656.0434782608696,
"min": 639.8461538461538,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30178.0,
"min": 15984.0,
"max": 33272.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9525564793983231,
"min": -1.0000000521540642,
"max": 0.9525564793983231,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 43.817598052322865,
"min": -31.999601677060127,
"max": 46.72079824656248,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9525564793983231,
"min": -1.0000000521540642,
"max": 0.9525564793983231,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 43.817598052322865,
"min": -31.999601677060127,
"max": 46.72079824656248,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.10305265652942067,
"min": 0.10199728273505042,
"max": 7.027245636098087,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.7404222003533505,
"min": 4.385883157607168,
"max": 112.43593017756939,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1735571123",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1735573831"
},
"total": 2708.087514588,
"count": 1,
"self": 0.7459083860003375,
"children": {
"run_training.setup": {
"total": 0.09459906399990814,
"count": 1,
"self": 0.09459906399990814
},
"TrainerController.start_learning": {
"total": 2707.247007138,
"count": 1,
"self": 2.044462784022471,
"children": {
"TrainerController._reset_env": {
"total": 3.5026701219999268,
"count": 1,
"self": 3.5026701219999268
},
"TrainerController.advance": {
"total": 2701.619903483978,
"count": 63226,
"self": 2.0823131560450747,
"children": {
"env_step": {
"total": 1734.705533780952,
"count": 63226,
"self": 1594.4621142209135,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.01794505901626,
"count": 63226,
"self": 5.741377828999475,
"children": {
"TorchPolicy.evaluate": {
"total": 133.27656723001678,
"count": 62558,
"self": 133.27656723001678
}
}
},
"workers": {
"total": 1.2254745010221768,
"count": 63226,
"self": 0.0,
"children": {
"worker_root": {
"total": 2701.057886568959,
"count": 63226,
"is_parallel": true,
"self": 1257.3386714489097,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005429410000033386,
"count": 1,
"is_parallel": true,
"self": 0.0009378129999504381,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0044915970000829475,
"count": 8,
"is_parallel": true,
"self": 0.0044915970000829475
}
}
},
"UnityEnvironment.step": {
"total": 0.10294719199998781,
"count": 1,
"is_parallel": true,
"self": 0.0007856630002152087,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005131349998919177,
"count": 1,
"is_parallel": true,
"self": 0.0005131349998919177
},
"communicator.exchange": {
"total": 0.09750259599991296,
"count": 1,
"is_parallel": true,
"self": 0.09750259599991296
},
"steps_from_proto": {
"total": 0.004145797999967726,
"count": 1,
"is_parallel": true,
"self": 0.000423678000174732,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0037221199997929943,
"count": 8,
"is_parallel": true,
"self": 0.0037221199997929943
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1443.7192151200493,
"count": 63225,
"is_parallel": true,
"self": 43.96181901400928,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.932565640043663,
"count": 63225,
"is_parallel": true,
"self": 27.932565640043663
},
"communicator.exchange": {
"total": 1250.817062214013,
"count": 63225,
"is_parallel": true,
"self": 1250.817062214013
},
"steps_from_proto": {
"total": 121.0077682519834,
"count": 63225,
"is_parallel": true,
"self": 25.852461573077107,
"children": {
"_process_rank_one_or_two_observation": {
"total": 95.1553066789063,
"count": 505800,
"is_parallel": true,
"self": 95.1553066789063
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 964.8320565469807,
"count": 63226,
"self": 3.608022636974283,
"children": {
"process_trajectory": {
"total": 149.67713122801115,
"count": 63226,
"self": 149.42917199701105,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24795923100009531,
"count": 2,
"self": 0.24795923100009531
}
}
},
"_update_policy": {
"total": 811.5469026819952,
"count": 438,
"self": 347.83861224100326,
"children": {
"TorchPPOOptimizer.update": {
"total": 463.70829044099196,
"count": 22857,
"self": 463.70829044099196
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.216999862663215e-06,
"count": 1,
"self": 1.216999862663215e-06
},
"TrainerController._save_models": {
"total": 0.07996953099973325,
"count": 1,
"self": 0.002366009000070335,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07760352199966292,
"count": 1,
"self": 0.07760352199966292
}
}
}
}
}
}
}