Jbot commited on
Commit
7a65511
Β·
1 Parent(s): 94ad772

Second time

Browse files
Pyramids.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:375e0de80e8c2f1bb70f4fc910275efef155f20a02a9d1c15ad3ec7b61c35161
3
  size 1417437
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46788013940627101a3cb7e6a8a2be5f818a9e38d7609e07699c2e13ddc1a6aa
3
  size 1417437
Pyramids/{Pyramids-1000007.onnx β†’ Pyramids-1000111.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:375e0de80e8c2f1bb70f4fc910275efef155f20a02a9d1c15ad3ec7b61c35161
3
  size 1417437
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46788013940627101a3cb7e6a8a2be5f818a9e38d7609e07699c2e13ddc1a6aa
3
  size 1417437
Pyramids/{Pyramids-1000007.pt β†’ Pyramids-1000111.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f97f4758a144b19a6a4f13d2f5717299ebe8dfd63853b25eba323feb3c29dae
3
- size 8652382
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4c7c6c77e9b0488bb4d4b815141610b8ed7b9e8af364b0c7c8fa392b47d4c2
3
+ size 8652354
Pyramids/{Pyramids-999879.onnx β†’ Pyramids-499989.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:375e0de80e8c2f1bb70f4fc910275efef155f20a02a9d1c15ad3ec7b61c35161
3
  size 1417437
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a93542598cf44bdcd32296d686cec1166a86b35a95d48a5ffc94f6b4ba0d7141
3
  size 1417437
Pyramids/{Pyramids-499946.pt β†’ Pyramids-499989.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b79fc92e09f679d33f85401ce0adcb3364479c24f737f5888d3a4783cb724a40
3
- size 8652382
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc04f4013b75fa20e4c51734c26bfd7c166b9fd488b8d548e8b9432ba09c5da4
3
+ size 8652354
Pyramids/Pyramids-999879.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cc18b80fe27509f71b56e3ce10ea71b8f79f9beba452ab97f0b077856a7f6e1
3
- size 8652382
 
 
 
 
Pyramids/{Pyramids-499946.onnx β†’ Pyramids-999983.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c7ac27cedd6df9e6c39f0dd28f146051dfbf3f58cd85413a59ce589eb13bf15
3
  size 1417437
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46788013940627101a3cb7e6a8a2be5f818a9e38d7609e07699c2e13ddc1a6aa
3
  size 1417437
Pyramids/Pyramids-999983.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66b135da1902ca1df7295c494bee0439221396cd76b05e9710873fafa3cf3fde
3
+ size 8652354
Pyramids/checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f97f4758a144b19a6a4f13d2f5717299ebe8dfd63853b25eba323feb3c29dae
3
- size 8652382
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4c7c6c77e9b0488bb4d4b815141610b8ed7b9e8af364b0c7c8fa392b47d4c2
3
+ size 8652354
Pyramids/{events.out.tfevents.1674418225.f73c5876ef7a.18271.0 β†’ events.out.tfevents.1674421873.f73c5876ef7a.33273.0} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f656160f821f5440e68ab46c59769e76873f52e997bec330808158a3c7d378fb
3
- size 342359
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93e7cd0441da559cae59613c15d6e5108da03e0503aa6add61a8647db7b0824d
3
+ size 332962
config.json CHANGED
@@ -1 +1 @@
1
- {"default_settings": null, "behaviors": {"Pyramids": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 128, "buffer_size": 2048, "learning_rate": 0.0003, "beta": 0.01, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 3, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "network_settings": {"normalize": false, "hidden_units": 512, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.99, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}, "rnd": {"gamma": 0.99, "strength": 0.01, "network_settings": {"normalize": false, "hidden_units": 64, "num_layers": 3, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "learning_rate": 0.0001, "encoding_size": null}}, "init_path": null, "keep_checkpoints": 5, "checkpoint_interval": 500000, "max_steps": 1000000, "time_horizon": 128, "summary_freq": 30000, "threaded": false, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./training-envs-executables/linux/Pyramids/Pyramids", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "Pyramids Training", "initialize_from": null, "load_model": false, "resume": false, "force": false, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
 
1
+ {"default_settings": null, "behaviors": {"Pyramids": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 128, "buffer_size": 2048, "learning_rate": 0.0003, "beta": 0.01, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 3, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "network_settings": {"normalize": false, "hidden_units": 512, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.99, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}, "rnd": {"gamma": 0.98, "strength": 0.01, "network_settings": {"normalize": false, "hidden_units": 64, "num_layers": 3, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "learning_rate": 0.0001, "encoding_size": null}}, "init_path": null, "keep_checkpoints": 5, "checkpoint_interval": 500000, "max_steps": 1000000, "time_horizon": 128, "summary_freq": 30000, "threaded": false, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./training-envs-executables/linux/Pyramids/Pyramids", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "Pyramids Training2", "initialize_from": null, "load_model": false, "resume": false, "force": false, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
configuration.yaml CHANGED
@@ -34,7 +34,7 @@ behaviors:
34
  goal_conditioning_type: hyper
35
  deterministic: false
36
  rnd:
37
- gamma: 0.99
38
  strength: 0.01
39
  network_settings:
40
  normalize: false
@@ -75,7 +75,7 @@ engine_settings:
75
  no_graphics: true
76
  environment_parameters: null
77
  checkpoint_settings:
78
- run_id: Pyramids Training
79
  initialize_from: null
80
  load_model: false
81
  resume: false
 
34
  goal_conditioning_type: hyper
35
  deterministic: false
36
  rnd:
37
+ gamma: 0.98
38
  strength: 0.01
39
  network_settings:
40
  normalize: false
 
75
  no_graphics: true
76
  environment_parameters: null
77
  checkpoint_settings:
78
+ run_id: Pyramids Training2
79
  initialize_from: null
80
  load_model: false
81
  resume: false
run_logs/Player-0.log CHANGED
@@ -1,12 +1,9 @@
1
  Mono path[0] = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/Managed'
2
  Mono config path = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/MonoBleedingEdge/etc'
3
  Found 1 interfaces on host : 0) 172.28.0.12
4
- Multi-casting "[IP] 172.28.0.12 [Port] 55407 [Flags] 2 [Guid] 2900623614 [EditorId] 764847374 [Version] 1048832 [Id] LinuxPlayer(13,172.28.0.12) [Debug] 0 [PackageName] LinuxPlayer [ProjectName] UnityEnvironment" to [225.0.0.222:54997]...
5
  Preloaded 'lib_burst_generated.so'
6
  Preloaded 'libgrpc_csharp_ext.x64.so'
7
- PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies
8
- PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies/UnityEnvironment
9
- Unable to load player prefs
10
  Initialize engine version: 2021.3.5f1 (40eb3a945986)
11
  [Subsystems] Discovering subsystems at path /content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/UnitySubsystems
12
  Forcing GfxDevice: Null
@@ -36,7 +33,7 @@ ALSA lib pcm.c:2642:(snd_pcm_open_noupdate) Unknown PCM default
36
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
37
  FMOD initialized on nosound output
38
  Begin MonoManager ReloadAssembly
39
- - Completed reload, in 0.080 seconds
40
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
41
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
42
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
@@ -68,7 +65,7 @@ ERROR: Shader Standard shader is not supported on this GPU (none of subshaders/f
68
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
69
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
70
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
71
- UnloadTime: 0.726329 ms
72
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
73
  requesting resize 84 x 84
74
  Setting up 1 worker threads for Enlighten.
@@ -76,7 +73,7 @@ PlayerConnection::CleanupMemory Statistics:
76
  [ALLOC_TEMP_TLS] TLS Allocator
77
  StackAllocators :
78
  [ALLOC_TEMP_MAIN]
79
- Peak usage frame count: [16.0 KB-32.0 KB]: 1495 frames, [32.0 KB-64.0 KB]: 14550 frames, [64.0 KB-128.0 KB]: 2725 frames, [2.0 MB-4.0 MB]: 1 frames
80
  Initial Block Size 4.0 MB
81
  Current Block Size 4.0 MB
82
  Peak Allocated Bytes 2.0 MB
@@ -131,12 +128,12 @@ PlayerConnection::CleanupMemory Statistics:
131
  Current Block Size 32.0 KB
132
  Peak Allocated Bytes 0 B
133
  Overflow Count 0
134
- [ALLOC_TEMP_Background Job.Worker 7]
135
  Initial Block Size 32.0 KB
136
  Current Block Size 32.0 KB
137
  Peak Allocated Bytes 0 B
138
  Overflow Count 0
139
- [ALLOC_TEMP_Background Job.Worker 15]
140
  Initial Block Size 32.0 KB
141
  Current Block Size 32.0 KB
142
  Peak Allocated Bytes 0 B
@@ -213,22 +210,22 @@ PlayerConnection::CleanupMemory Statistics:
213
  Peak Allocated memory 1.6 MB
214
  Peak Large allocation bytes 0 B
215
  [ALLOC_DEFAULT] Dual Thread Allocator
216
- Peak main deferred allocation count 12989
217
  [ALLOC_BUCKET]
218
  Large Block size 4.0 MB
219
  Used Block count 1
220
- Peak Allocated bytes 1.5 MB
221
  [ALLOC_DEFAULT_MAIN]
222
  Peak usage frame count: [8.0 MB-16.0 MB]: 1 frames, [16.0 MB-32.0 MB]: 18770 frames
223
  Requested Block Size 16.0 MB
224
  Peak Block count 3
225
- Peak Allocated memory 29.8 MB
226
  Peak Large allocation bytes 0 B
227
  [ALLOC_DEFAULT_THREAD]
228
  Peak usage frame count: [16.0 MB-32.0 MB]: 18771 frames
229
  Requested Block Size 16.0 MB
230
  Peak Block count 1
231
- Peak Allocated memory 23.5 MB
232
  Peak Large allocation bytes 16.0 MB
233
  [ALLOC_TEMP_JOB_1_FRAME]
234
  Initial Block Size 2.0 MB
@@ -255,7 +252,7 @@ PlayerConnection::CleanupMemory Statistics:
255
  [ALLOC_BUCKET]
256
  Large Block size 4.0 MB
257
  Used Block count 1
258
- Peak Allocated bytes 1.5 MB
259
  [ALLOC_GFX_MAIN]
260
  Peak usage frame count: [32.0 KB-64.0 KB]: 18770 frames, [64.0 KB-128.0 KB]: 1 frames
261
  Requested Block Size 16.0 MB
@@ -273,7 +270,7 @@ PlayerConnection::CleanupMemory Statistics:
273
  [ALLOC_BUCKET]
274
  Large Block size 4.0 MB
275
  Used Block count 1
276
- Peak Allocated bytes 1.5 MB
277
  [ALLOC_CACHEOBJECTS_MAIN]
278
  Peak usage frame count: [0.5 MB-1.0 MB]: 299 frames, [1.0 MB-2.0 MB]: 18472 frames
279
  Requested Block Size 4.0 MB
@@ -291,7 +288,7 @@ PlayerConnection::CleanupMemory Statistics:
291
  [ALLOC_BUCKET]
292
  Large Block size 4.0 MB
293
  Used Block count 1
294
- Peak Allocated bytes 1.5 MB
295
  [ALLOC_TYPETREE_MAIN]
296
  Peak usage frame count: [0-1.0 KB]: 18771 frames
297
  Requested Block Size 2.0 MB
@@ -314,4 +311,4 @@ PlayerConnection::CleanupMemory Statistics:
314
  Large Block size 4.0 MB
315
  Used Block count 1
316
  Peak Allocated bytes 396 B
317
- ##utp:{"type":"MemoryLeaks","version":2,"phase":"Immediate","time":1674420181755,"processId":18292,"allocatedMemory":1957332,"memoryLabels":[{"Default":9033},{"Permanent":1264},{"Thread":34460},{"Manager":10603},{"VertexData":12},{"Geometry":280},{"Texture":16},{"Shader":69173},{"Material":24},{"GfxDevice":35248},{"Animation":304},{"Audio":3976},{"Physics":288},{"Serialization":216},{"Input":9176},{"JobScheduler":200},{"Mono":40},{"ScriptingNativeRuntime":216},{"BaseObject":1609212},{"Resource":592},{"Renderer":1936},{"Transform":48},{"File":800},{"WebCam":24},{"Culling":40},{"Terrain":953},{"Wind":24},{"String":3447},{"DynamicArray":30868},{"HashMap":7680},{"Utility":1360},{"PoolAlloc":1160},{"TypeTree":1792},{"ScriptManager":80},{"RuntimeInitializeOnLoadManager":72},{"SpriteAtlas":112},{"GI":3272},{"Unet":16},{"Director":7760},{"WebRequest":720},{"VR":45473},{"SceneManager":424},{"Video":32},{"LazyScriptCache":32},{"NativeArray":384},{"Camera":25},{"Secure":1},{"SerializationCache":624},{"APIUpdating":5872},{"Subsystems":384},{"VirtualTexturing":57552},{"AssetReference":32}]}
 
1
  Mono path[0] = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/Managed'
2
  Mono config path = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/MonoBleedingEdge/etc'
3
  Found 1 interfaces on host : 0) 172.28.0.12
4
+ Multi-casting "[IP] 172.28.0.12 [Port] 55116 [Flags] 2 [Guid] 4253888430 [EditorId] 764847374 [Version] 1048832 [Id] LinuxPlayer(13,172.28.0.12) [Debug] 0 [PackageName] LinuxPlayer [ProjectName] UnityEnvironment" to [225.0.0.222:54997]...
5
  Preloaded 'lib_burst_generated.so'
6
  Preloaded 'libgrpc_csharp_ext.x64.so'
 
 
 
7
  Initialize engine version: 2021.3.5f1 (40eb3a945986)
8
  [Subsystems] Discovering subsystems at path /content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/UnitySubsystems
9
  Forcing GfxDevice: Null
 
33
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
34
  FMOD initialized on nosound output
35
  Begin MonoManager ReloadAssembly
36
+ - Completed reload, in 0.079 seconds
37
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
38
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
39
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
 
65
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
66
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
67
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
68
+ UnloadTime: 0.728298 ms
69
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
70
  requesting resize 84 x 84
71
  Setting up 1 worker threads for Enlighten.
 
73
  [ALLOC_TEMP_TLS] TLS Allocator
74
  StackAllocators :
75
  [ALLOC_TEMP_MAIN]
76
+ Peak usage frame count: [16.0 KB-32.0 KB]: 4770 frames, [32.0 KB-64.0 KB]: 11628 frames, [64.0 KB-128.0 KB]: 2372 frames, [2.0 MB-4.0 MB]: 1 frames
77
  Initial Block Size 4.0 MB
78
  Current Block Size 4.0 MB
79
  Peak Allocated Bytes 2.0 MB
 
128
  Current Block Size 32.0 KB
129
  Peak Allocated Bytes 0 B
130
  Overflow Count 0
131
+ [ALLOC_TEMP_Background Job.Worker 15]
132
  Initial Block Size 32.0 KB
133
  Current Block Size 32.0 KB
134
  Peak Allocated Bytes 0 B
135
  Overflow Count 0
136
+ [ALLOC_TEMP_Background Job.Worker 7]
137
  Initial Block Size 32.0 KB
138
  Current Block Size 32.0 KB
139
  Peak Allocated Bytes 0 B
 
210
  Peak Allocated memory 1.6 MB
211
  Peak Large allocation bytes 0 B
212
  [ALLOC_DEFAULT] Dual Thread Allocator
213
+ Peak main deferred allocation count 11320
214
  [ALLOC_BUCKET]
215
  Large Block size 4.0 MB
216
  Used Block count 1
217
+ Peak Allocated bytes 1.6 MB
218
  [ALLOC_DEFAULT_MAIN]
219
  Peak usage frame count: [8.0 MB-16.0 MB]: 1 frames, [16.0 MB-32.0 MB]: 18770 frames
220
  Requested Block Size 16.0 MB
221
  Peak Block count 3
222
+ Peak Allocated memory 31.2 MB
223
  Peak Large allocation bytes 0 B
224
  [ALLOC_DEFAULT_THREAD]
225
  Peak usage frame count: [16.0 MB-32.0 MB]: 18771 frames
226
  Requested Block Size 16.0 MB
227
  Peak Block count 1
228
+ Peak Allocated memory 22.3 MB
229
  Peak Large allocation bytes 16.0 MB
230
  [ALLOC_TEMP_JOB_1_FRAME]
231
  Initial Block Size 2.0 MB
 
252
  [ALLOC_BUCKET]
253
  Large Block size 4.0 MB
254
  Used Block count 1
255
+ Peak Allocated bytes 1.6 MB
256
  [ALLOC_GFX_MAIN]
257
  Peak usage frame count: [32.0 KB-64.0 KB]: 18770 frames, [64.0 KB-128.0 KB]: 1 frames
258
  Requested Block Size 16.0 MB
 
270
  [ALLOC_BUCKET]
271
  Large Block size 4.0 MB
272
  Used Block count 1
273
+ Peak Allocated bytes 1.6 MB
274
  [ALLOC_CACHEOBJECTS_MAIN]
275
  Peak usage frame count: [0.5 MB-1.0 MB]: 299 frames, [1.0 MB-2.0 MB]: 18472 frames
276
  Requested Block Size 4.0 MB
 
288
  [ALLOC_BUCKET]
289
  Large Block size 4.0 MB
290
  Used Block count 1
291
+ Peak Allocated bytes 1.6 MB
292
  [ALLOC_TYPETREE_MAIN]
293
  Peak usage frame count: [0-1.0 KB]: 18771 frames
294
  Requested Block Size 2.0 MB
 
311
  Large Block size 4.0 MB
312
  Used Block count 1
313
  Peak Allocated bytes 396 B
314
+ ##utp:{"type":"MemoryLeaks","version":2,"phase":"Immediate","time":1674423853299,"processId":33294,"allocatedMemory":1957332,"memoryLabels":[{"Default":9033},{"Permanent":1264},{"Thread":34460},{"Manager":10603},{"VertexData":12},{"Geometry":280},{"Texture":16},{"Shader":69173},{"Material":24},{"GfxDevice":35248},{"Animation":304},{"Audio":3976},{"Physics":288},{"Serialization":216},{"Input":9176},{"JobScheduler":200},{"Mono":40},{"ScriptingNativeRuntime":216},{"BaseObject":1609212},{"Resource":592},{"Renderer":1936},{"Transform":48},{"File":800},{"WebCam":24},{"Culling":40},{"Terrain":953},{"Wind":24},{"String":3447},{"DynamicArray":30868},{"HashMap":7680},{"Utility":1360},{"PoolAlloc":1160},{"TypeTree":1792},{"ScriptManager":80},{"RuntimeInitializeOnLoadManager":72},{"SpriteAtlas":112},{"GI":3272},{"Unet":16},{"Director":7760},{"WebRequest":720},{"VR":45473},{"SceneManager":424},{"Video":32},{"LazyScriptCache":32},{"NativeArray":384},{"Camera":25},{"Secure":1},{"SerializationCache":624},{"APIUpdating":5872},{"Subsystems":384},{"VirtualTexturing":57552},{"AssetReference":32}]}
run_logs/timers.json CHANGED
@@ -2,171 +2,171 @@
2
  "name": "root",
3
  "gauges": {
4
  "Pyramids.Policy.Entropy.mean": {
5
- "value": 0.5169119238853455,
6
- "min": 0.5169119238853455,
7
- "max": 1.3073537349700928,
8
  "count": 33
9
  },
10
  "Pyramids.Policy.Entropy.sum": {
11
- "value": 15441.1923828125,
12
- "min": 15441.1923828125,
13
- "max": 39659.8828125,
14
  "count": 33
15
  },
16
  "Pyramids.Step.mean": {
17
- "value": 989989.0,
18
- "min": 29980.0,
19
- "max": 989989.0,
20
  "count": 33
21
  },
22
  "Pyramids.Step.sum": {
23
- "value": 989989.0,
24
- "min": 29980.0,
25
- "max": 989989.0,
26
  "count": 33
27
  },
28
  "Pyramids.Policy.ExtrinsicValueEstimate.mean": {
29
- "value": 0.3589520752429962,
30
- "min": -0.10609453171491623,
31
- "max": 0.4193597733974457,
32
  "count": 33
33
  },
34
  "Pyramids.Policy.ExtrinsicValueEstimate.sum": {
35
- "value": 96.19915771484375,
36
- "min": -25.462688446044922,
37
- "max": 114.06585693359375,
38
  "count": 33
39
  },
40
  "Pyramids.Policy.RndValueEstimate.mean": {
41
- "value": 0.03227285295724869,
42
- "min": -0.011950272135436535,
43
- "max": 0.4846712648868561,
44
  "count": 33
45
  },
46
  "Pyramids.Policy.RndValueEstimate.sum": {
47
- "value": 8.649124145507812,
48
- "min": -3.250473976135254,
49
- "max": 115.35176086425781,
50
  "count": 33
51
  },
52
  "Pyramids.Losses.PolicyLoss.mean": {
53
- "value": 0.06635625898239336,
54
- "min": 0.06579637175146426,
55
- "max": 0.07496118016013711,
56
  "count": 33
57
  },
58
  "Pyramids.Losses.PolicyLoss.sum": {
59
- "value": 0.928987625753507,
60
- "min": 0.5335781675934241,
61
- "max": 1.0839560226589788,
62
  "count": 33
63
  },
64
  "Pyramids.Losses.ValueLoss.mean": {
65
- "value": 0.013335079653881535,
66
- "min": 0.0010710213334948618,
67
- "max": 0.01573796351989718,
68
  "count": 33
69
  },
70
  "Pyramids.Losses.ValueLoss.sum": {
71
- "value": 0.1866911151543415,
72
- "min": 0.012852256001938341,
73
- "max": 0.22033148927856053,
74
  "count": 33
75
  },
76
  "Pyramids.Policy.LearningRate.mean": {
77
- "value": 7.618511746242856e-06,
78
- "min": 7.618511746242856e-06,
79
- "max": 0.00029484975171675,
80
  "count": 33
81
  },
82
  "Pyramids.Policy.LearningRate.sum": {
83
- "value": 0.00010665916444739999,
84
- "min": 0.00010665916444739999,
85
- "max": 0.0036335566888144994,
86
  "count": 33
87
  },
88
  "Pyramids.Policy.Epsilon.mean": {
89
- "value": 0.10253947142857144,
90
- "min": 0.10253947142857144,
91
- "max": 0.19828325,
92
  "count": 33
93
  },
94
  "Pyramids.Policy.Epsilon.sum": {
95
- "value": 1.4355526,
96
- "min": 1.4355526,
97
- "max": 2.6111855000000004,
98
  "count": 33
99
  },
100
  "Pyramids.Policy.Beta.mean": {
101
- "value": 0.00026369319571428577,
102
- "min": 0.00026369319571428577,
103
- "max": 0.009828496675,
104
  "count": 33
105
  },
106
  "Pyramids.Policy.Beta.sum": {
107
- "value": 0.0036917047400000006,
108
- "min": 0.0036917047400000006,
109
- "max": 0.12113743144999999,
110
  "count": 33
111
  },
112
  "Pyramids.Losses.RNDLoss.mean": {
113
- "value": 0.00866986159235239,
114
- "min": 0.00866986159235239,
115
- "max": 0.5435911417007446,
116
  "count": 33
117
  },
118
  "Pyramids.Losses.RNDLoss.sum": {
119
- "value": 0.12137806415557861,
120
- "min": 0.12137806415557861,
121
- "max": 4.348729133605957,
122
  "count": 33
123
  },
124
  "Pyramids.Environment.EpisodeLength.mean": {
125
- "value": 440.5571428571429,
126
- "min": 422.02777777777777,
127
- "max": 995.71875,
128
  "count": 33
129
  },
130
  "Pyramids.Environment.EpisodeLength.sum": {
131
- "value": 30839.0,
132
- "min": 16650.0,
133
- "max": 33042.0,
134
  "count": 33
135
  },
136
  "Pyramids.Environment.CumulativeReward.mean": {
137
- "value": 1.3307799742690154,
138
- "min": -0.9341500529553741,
139
- "max": 1.3556777554460697,
140
  "count": 33
141
  },
142
  "Pyramids.Environment.CumulativeReward.sum": {
143
- "value": 93.15459819883108,
144
- "min": -29.892801694571972,
145
- "max": 97.60879839211702,
146
  "count": 33
147
  },
148
  "Pyramids.Policy.ExtrinsicReward.mean": {
149
- "value": 1.3307799742690154,
150
- "min": -0.9341500529553741,
151
- "max": 1.3556777554460697,
152
  "count": 33
153
  },
154
  "Pyramids.Policy.ExtrinsicReward.sum": {
155
- "value": 93.15459819883108,
156
- "min": -29.892801694571972,
157
- "max": 97.60879839211702,
158
  "count": 33
159
  },
160
  "Pyramids.Policy.RndReward.mean": {
161
- "value": 0.039998169381786804,
162
- "min": 0.03832940760488176,
163
- "max": 11.33438495794932,
164
  "count": 33
165
  },
166
  "Pyramids.Policy.RndReward.sum": {
167
- "value": 2.799871856725076,
168
- "min": 2.7597173475514865,
169
- "max": 204.01892924308777,
170
  "count": 33
171
  },
172
  "Pyramids.IsTraining.mean": {
@@ -184,74 +184,74 @@
184
  },
185
  "metadata": {
186
  "timer_format_version": "0.1.0",
187
- "start_time_seconds": "1674418222",
188
  "python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
189
- "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
190
  "mlagents_version": "0.29.0.dev0",
191
  "mlagents_envs_version": "0.29.0.dev0",
192
  "communication_protocol_version": "1.5.0",
193
  "pytorch_version": "1.8.1+cu102",
194
  "numpy_version": "1.21.6",
195
- "end_time_seconds": "1674420181"
196
  },
197
- "total": 1958.951682592,
198
  "count": 1,
199
- "self": 0.42629265100003977,
200
  "children": {
201
  "run_training.setup": {
202
- "total": 0.10293960899980448,
203
  "count": 1,
204
- "self": 0.10293960899980448
205
  },
206
  "TrainerController.start_learning": {
207
- "total": 1958.4224503320002,
208
  "count": 1,
209
- "self": 1.1347517761678318,
210
  "children": {
211
  "TrainerController._reset_env": {
212
- "total": 5.989304759999868,
213
  "count": 1,
214
- "self": 5.989304759999868
215
  },
216
  "TrainerController.advance": {
217
- "total": 1951.2169138228328,
218
- "count": 63606,
219
- "self": 1.2237890878641338,
220
  "children": {
221
  "env_step": {
222
- "total": 1312.9251238820461,
223
- "count": 63606,
224
- "self": 1210.4891112990072,
225
  "children": {
226
  "SubprocessEnvManager._take_step": {
227
- "total": 101.71212524004659,
228
- "count": 63606,
229
- "self": 4.385736698061919,
230
  "children": {
231
  "TorchPolicy.evaluate": {
232
- "total": 97.32638854198467,
233
  "count": 62570,
234
- "self": 32.59449646198118,
235
  "children": {
236
  "TorchPolicy.sample_actions": {
237
- "total": 64.73189208000349,
238
  "count": 62570,
239
- "self": 64.73189208000349
240
  }
241
  }
242
  }
243
  }
244
  },
245
  "workers": {
246
- "total": 0.7238873429923842,
247
- "count": 63606,
248
  "self": 0.0,
249
  "children": {
250
  "worker_root": {
251
- "total": 1954.2095819389142,
252
- "count": 63606,
253
  "is_parallel": true,
254
- "self": 840.0561010508136,
255
  "children": {
256
  "run_training.setup": {
257
  "total": 0.0,
@@ -260,48 +260,48 @@
260
  "self": 0.0,
261
  "children": {
262
  "steps_from_proto": {
263
- "total": 0.0021525120000660536,
264
  "count": 1,
265
  "is_parallel": true,
266
- "self": 0.0007887900010246085,
267
  "children": {
268
  "_process_rank_one_or_two_observation": {
269
- "total": 0.001363721999041445,
270
  "count": 8,
271
  "is_parallel": true,
272
- "self": 0.001363721999041445
273
  }
274
  }
275
  },
276
  "UnityEnvironment.step": {
277
- "total": 0.045942916000058176,
278
  "count": 1,
279
  "is_parallel": true,
280
- "self": 0.0005090849999760394,
281
  "children": {
282
  "UnityEnvironment._generate_step_input": {
283
- "total": 0.0004749350000565755,
284
  "count": 1,
285
  "is_parallel": true,
286
- "self": 0.0004749350000565755
287
  },
288
  "communicator.exchange": {
289
- "total": 0.043164431000150216,
290
  "count": 1,
291
  "is_parallel": true,
292
- "self": 0.043164431000150216
293
  },
294
  "steps_from_proto": {
295
- "total": 0.0017944649998753448,
296
  "count": 1,
297
  "is_parallel": true,
298
- "self": 0.0004509650016188971,
299
  "children": {
300
  "_process_rank_one_or_two_observation": {
301
- "total": 0.0013434999982564477,
302
  "count": 8,
303
  "is_parallel": true,
304
- "self": 0.0013434999982564477
305
  }
306
  }
307
  }
@@ -310,34 +310,34 @@
310
  }
311
  },
312
  "UnityEnvironment.step": {
313
- "total": 1114.1534808881006,
314
- "count": 63605,
315
  "is_parallel": true,
316
- "self": 27.019050720966334,
317
  "children": {
318
  "UnityEnvironment._generate_step_input": {
319
- "total": 23.044856184942546,
320
- "count": 63605,
321
  "is_parallel": true,
322
- "self": 23.044856184942546
323
  },
324
  "communicator.exchange": {
325
- "total": 963.2631649760033,
326
- "count": 63605,
327
  "is_parallel": true,
328
- "self": 963.2631649760033
329
  },
330
  "steps_from_proto": {
331
- "total": 100.82640900618844,
332
- "count": 63605,
333
  "is_parallel": true,
334
- "self": 22.094549035230557,
335
  "children": {
336
  "_process_rank_one_or_two_observation": {
337
- "total": 78.73185997095788,
338
- "count": 508840,
339
  "is_parallel": true,
340
- "self": 78.73185997095788
341
  }
342
  }
343
  }
@@ -350,31 +350,31 @@
350
  }
351
  },
352
  "trainer_advance": {
353
- "total": 637.0680008529225,
354
- "count": 63606,
355
- "self": 2.242945856926781,
356
  "children": {
357
  "process_trajectory": {
358
- "total": 144.45091875599883,
359
- "count": 63606,
360
- "self": 144.24995177599885,
361
  "children": {
362
  "RLTrainer._checkpoint": {
363
- "total": 0.2009669799999756,
364
  "count": 2,
365
- "self": 0.2009669799999756
366
  }
367
  }
368
  },
369
  "_update_policy": {
370
- "total": 490.3741362399969,
371
- "count": 455,
372
- "self": 184.62454752995836,
373
  "children": {
374
  "TorchPPOOptimizer.update": {
375
- "total": 305.74958871003855,
376
- "count": 22782,
377
- "self": 305.74958871003855
378
  }
379
  }
380
  }
@@ -383,19 +383,19 @@
383
  }
384
  },
385
  "trainer_threads": {
386
- "total": 9.79000105871819e-07,
387
  "count": 1,
388
- "self": 9.79000105871819e-07
389
  },
390
  "TrainerController._save_models": {
391
- "total": 0.0814789939995535,
392
  "count": 1,
393
- "self": 0.0013771670001005987,
394
  "children": {
395
  "RLTrainer._checkpoint": {
396
- "total": 0.0801018269994529,
397
  "count": 1,
398
- "self": 0.0801018269994529
399
  }
400
  }
401
  }
 
2
  "name": "root",
3
  "gauges": {
4
  "Pyramids.Policy.Entropy.mean": {
5
+ "value": 0.31421124935150146,
6
+ "min": 0.2840237617492676,
7
+ "max": 1.3432326316833496,
8
  "count": 33
9
  },
10
  "Pyramids.Policy.Entropy.sum": {
11
+ "value": 9431.365234375,
12
+ "min": 8525.2568359375,
13
+ "max": 40748.3046875,
14
  "count": 33
15
  },
16
  "Pyramids.Step.mean": {
17
+ "value": 989908.0,
18
+ "min": 29952.0,
19
+ "max": 989908.0,
20
  "count": 33
21
  },
22
  "Pyramids.Step.sum": {
23
+ "value": 989908.0,
24
+ "min": 29952.0,
25
+ "max": 989908.0,
26
  "count": 33
27
  },
28
  "Pyramids.Policy.ExtrinsicValueEstimate.mean": {
29
+ "value": 0.28630736470222473,
30
+ "min": -0.10326258093118668,
31
+ "max": 0.3394848704338074,
32
  "count": 33
33
  },
34
  "Pyramids.Policy.ExtrinsicValueEstimate.sum": {
35
+ "value": 73.86730194091797,
36
+ "min": -24.886281967163086,
37
+ "max": 86.90812683105469,
38
  "count": 33
39
  },
40
  "Pyramids.Policy.RndValueEstimate.mean": {
41
+ "value": 0.002211504615843296,
42
+ "min": -0.003026550868526101,
43
+ "max": 0.19428789615631104,
44
  "count": 33
45
  },
46
  "Pyramids.Policy.RndValueEstimate.sum": {
47
+ "value": 0.5705682039260864,
48
+ "min": -0.753611147403717,
49
+ "max": 46.04623031616211,
50
  "count": 33
51
  },
52
  "Pyramids.Losses.PolicyLoss.mean": {
53
+ "value": 0.06974054059003071,
54
+ "min": 0.06547258155443982,
55
+ "max": 0.07400301405891577,
56
  "count": 33
57
  },
58
  "Pyramids.Losses.PolicyLoss.sum": {
59
+ "value": 1.0461081088504607,
60
+ "min": 0.4902440781697387,
61
+ "max": 1.0932508326813524,
62
  "count": 33
63
  },
64
  "Pyramids.Losses.ValueLoss.mean": {
65
+ "value": 0.010319827537303808,
66
+ "min": 0.0010141509631897306,
67
+ "max": 0.011688448390607186,
68
  "count": 33
69
  },
70
  "Pyramids.Losses.ValueLoss.sum": {
71
+ "value": 0.1547974130595571,
72
+ "min": 0.013114335913794281,
73
+ "max": 0.16533768097410667,
74
  "count": 33
75
  },
76
  "Pyramids.Policy.LearningRate.mean": {
77
+ "value": 7.47305750901333e-06,
78
+ "min": 7.47305750901333e-06,
79
+ "max": 0.00029515063018788575,
80
  "count": 33
81
  },
82
  "Pyramids.Policy.LearningRate.sum": {
83
+ "value": 0.00011209586263519996,
84
+ "min": 0.00011209586263519996,
85
+ "max": 0.003633732788755799,
86
  "count": 33
87
  },
88
  "Pyramids.Policy.Epsilon.mean": {
89
+ "value": 0.10249098666666664,
90
+ "min": 0.10249098666666664,
91
+ "max": 0.19838354285714285,
92
  "count": 33
93
  },
94
  "Pyramids.Policy.Epsilon.sum": {
95
+ "value": 1.5373647999999998,
96
+ "min": 1.371456,
97
+ "max": 2.6112442000000007,
98
  "count": 33
99
  },
100
  "Pyramids.Policy.Beta.mean": {
101
+ "value": 0.00025884956799999997,
102
+ "min": 0.00025884956799999997,
103
+ "max": 0.00983851593142857,
104
  "count": 33
105
  },
106
  "Pyramids.Policy.Beta.sum": {
107
+ "value": 0.0038827435199999994,
108
+ "min": 0.0038827435199999994,
109
+ "max": 0.12114329557999999,
110
  "count": 33
111
  },
112
  "Pyramids.Losses.RNDLoss.mean": {
113
+ "value": 0.010354802943766117,
114
+ "min": 0.010210125707089901,
115
+ "max": 0.45836424827575684,
116
  "count": 33
117
  },
118
  "Pyramids.Losses.RNDLoss.sum": {
119
+ "value": 0.15532204508781433,
120
+ "min": 0.132731631398201,
121
+ "max": 3.208549737930298,
122
  "count": 33
123
  },
124
  "Pyramids.Environment.EpisodeLength.mean": {
125
+ "value": 525.5192307692307,
126
+ "min": 495.7049180327869,
127
+ "max": 999.0,
128
  "count": 33
129
  },
130
  "Pyramids.Environment.EpisodeLength.sum": {
131
+ "value": 27327.0,
132
+ "min": 15984.0,
133
+ "max": 33967.0,
134
  "count": 33
135
  },
136
  "Pyramids.Environment.CumulativeReward.mean": {
137
+ "value": 1.0897307371577392,
138
+ "min": -1.0000000521540642,
139
+ "max": 1.1325817946683276,
140
  "count": 33
141
  },
142
  "Pyramids.Environment.CumulativeReward.sum": {
143
+ "value": 56.665998332202435,
144
+ "min": -29.099601708352566,
145
+ "max": 67.75399824976921,
146
  "count": 33
147
  },
148
  "Pyramids.Policy.ExtrinsicReward.mean": {
149
+ "value": 1.0897307371577392,
150
+ "min": -1.0000000521540642,
151
+ "max": 1.1325817946683276,
152
  "count": 33
153
  },
154
  "Pyramids.Policy.ExtrinsicReward.sum": {
155
+ "value": 56.665998332202435,
156
+ "min": -29.099601708352566,
157
+ "max": 67.75399824976921,
158
  "count": 33
159
  },
160
  "Pyramids.Policy.RndReward.mean": {
161
+ "value": 0.056008590235321135,
162
+ "min": 0.056008590235321135,
163
+ "max": 8.996327891945839,
164
  "count": 33
165
  },
166
  "Pyramids.Policy.RndReward.sum": {
167
+ "value": 2.912446692236699,
168
+ "min": 2.912446692236699,
169
+ "max": 143.94124627113342,
170
  "count": 33
171
  },
172
  "Pyramids.IsTraining.mean": {
 
184
  },
185
  "metadata": {
186
  "timer_format_version": "0.1.0",
187
+ "start_time_seconds": "1674421870",
188
  "python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
189
+ "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training2 --no-graphics",
190
  "mlagents_version": "0.29.0.dev0",
191
  "mlagents_envs_version": "0.29.0.dev0",
192
  "communication_protocol_version": "1.5.0",
193
  "pytorch_version": "1.8.1+cu102",
194
  "numpy_version": "1.21.6",
195
+ "end_time_seconds": "1674423853"
196
  },
197
+ "total": 1983.170290858,
198
  "count": 1,
199
+ "self": 0.4334071639996182,
200
  "children": {
201
  "run_training.setup": {
202
+ "total": 0.10060317799980112,
203
  "count": 1,
204
+ "self": 0.10060317799980112
205
  },
206
  "TrainerController.start_learning": {
207
+ "total": 1982.6362805160006,
208
  "count": 1,
209
+ "self": 1.353520445984941,
210
  "children": {
211
  "TrainerController._reset_env": {
212
+ "total": 5.930512423999971,
213
  "count": 1,
214
+ "self": 5.930512423999971
215
  },
216
  "TrainerController.advance": {
217
+ "total": 1975.2708701840156,
218
+ "count": 63585,
219
+ "self": 1.3766213879198403,
220
  "children": {
221
  "env_step": {
222
+ "total": 1327.3217640259427,
223
+ "count": 63585,
224
+ "self": 1217.1686995927785,
225
  "children": {
226
  "SubprocessEnvManager._take_step": {
227
+ "total": 109.33425272204022,
228
+ "count": 63585,
229
+ "self": 4.618678569950134,
230
  "children": {
231
  "TorchPolicy.evaluate": {
232
+ "total": 104.71557415209008,
233
  "count": 62570,
234
+ "self": 35.004307358019105,
235
  "children": {
236
  "TorchPolicy.sample_actions": {
237
+ "total": 69.71126679407098,
238
  "count": 62570,
239
+ "self": 69.71126679407098
240
  }
241
  }
242
  }
243
  }
244
  },
245
  "workers": {
246
+ "total": 0.818811711123999,
247
+ "count": 63585,
248
  "self": 0.0,
249
  "children": {
250
  "worker_root": {
251
+ "total": 1977.8371454249473,
252
+ "count": 63585,
253
  "is_parallel": true,
254
+ "self": 863.7658797191134,
255
  "children": {
256
  "run_training.setup": {
257
  "total": 0.0,
 
260
  "self": 0.0,
261
  "children": {
262
  "steps_from_proto": {
263
+ "total": 0.002054730999589083,
264
  "count": 1,
265
  "is_parallel": true,
266
+ "self": 0.0007944589970065863,
267
  "children": {
268
  "_process_rank_one_or_two_observation": {
269
+ "total": 0.0012602720025824965,
270
  "count": 8,
271
  "is_parallel": true,
272
+ "self": 0.0012602720025824965
273
  }
274
  }
275
  },
276
  "UnityEnvironment.step": {
277
+ "total": 0.048239406999528,
278
  "count": 1,
279
  "is_parallel": true,
280
+ "self": 0.0004901860002064495,
281
  "children": {
282
  "UnityEnvironment._generate_step_input": {
283
+ "total": 0.00045722699996986194,
284
  "count": 1,
285
  "is_parallel": true,
286
+ "self": 0.00045722699996986194
287
  },
288
  "communicator.exchange": {
289
+ "total": 0.045628173999830324,
290
  "count": 1,
291
  "is_parallel": true,
292
+ "self": 0.045628173999830324
293
  },
294
  "steps_from_proto": {
295
+ "total": 0.0016638199995213654,
296
  "count": 1,
297
  "is_parallel": true,
298
+ "self": 0.0004661449993363931,
299
  "children": {
300
  "_process_rank_one_or_two_observation": {
301
+ "total": 0.0011976750001849723,
302
  "count": 8,
303
  "is_parallel": true,
304
+ "self": 0.0011976750001849723
305
  }
306
  }
307
  }
 
310
  }
311
  },
312
  "UnityEnvironment.step": {
313
+ "total": 1114.0712657058339,
314
+ "count": 63584,
315
  "is_parallel": true,
316
+ "self": 28.284180096286946,
317
  "children": {
318
  "UnityEnvironment._generate_step_input": {
319
+ "total": 23.93000472285894,
320
+ "count": 63584,
321
  "is_parallel": true,
322
+ "self": 23.93000472285894
323
  },
324
  "communicator.exchange": {
325
+ "total": 956.1459612529579,
326
+ "count": 63584,
327
  "is_parallel": true,
328
+ "self": 956.1459612529579
329
  },
330
  "steps_from_proto": {
331
+ "total": 105.71111963373005,
332
+ "count": 63584,
333
  "is_parallel": true,
334
+ "self": 23.549288799286842,
335
  "children": {
336
  "_process_rank_one_or_two_observation": {
337
+ "total": 82.16183083444321,
338
+ "count": 508672,
339
  "is_parallel": true,
340
+ "self": 82.16183083444321
341
  }
342
  }
343
  }
 
350
  }
351
  },
352
  "trainer_advance": {
353
+ "total": 646.5724847701531,
354
+ "count": 63585,
355
+ "self": 2.5010763138561742,
356
  "children": {
357
  "process_trajectory": {
358
+ "total": 150.38760613227169,
359
+ "count": 63585,
360
+ "self": 150.19726568227088,
361
  "children": {
362
  "RLTrainer._checkpoint": {
363
+ "total": 0.19034045000080368,
364
  "count": 2,
365
+ "self": 0.19034045000080368
366
  }
367
  }
368
  },
369
  "_update_policy": {
370
+ "total": 493.68380232402524,
371
+ "count": 453,
372
+ "self": 183.87012862614483,
373
  "children": {
374
  "TorchPPOOptimizer.update": {
375
+ "total": 309.8136736978804,
376
+ "count": 22758,
377
+ "self": 309.8136736978804
378
  }
379
  }
380
  }
 
383
  }
384
  },
385
  "trainer_threads": {
386
+ "total": 9.640007192501798e-07,
387
  "count": 1,
388
+ "self": 9.640007192501798e-07
389
  },
390
  "TrainerController._save_models": {
391
+ "total": 0.08137649799937208,
392
  "count": 1,
393
+ "self": 0.001334700998995686,
394
  "children": {
395
  "RLTrainer._checkpoint": {
396
+ "total": 0.08004179700037639,
397
  "count": 1,
398
+ "self": 0.08004179700037639
399
  }
400
  }
401
  }
run_logs/training_status.json CHANGED
@@ -2,40 +2,40 @@
2
  "Pyramids": {
3
  "checkpoints": [
4
  {
5
- "steps": 499946,
6
- "file_path": "results/Pyramids Training/Pyramids/Pyramids-499946.onnx",
7
- "reward": 1.6246666461229324,
8
- "creation_time": 1674419154.5548272,
9
  "auxillary_file_paths": [
10
- "results/Pyramids Training/Pyramids/Pyramids-499946.pt"
11
  ]
12
  },
13
  {
14
- "steps": 999879,
15
- "file_path": "results/Pyramids Training/Pyramids/Pyramids-999879.onnx",
16
- "reward": 1.5129999443888664,
17
- "creation_time": 1674420181.2846541,
18
  "auxillary_file_paths": [
19
- "results/Pyramids Training/Pyramids/Pyramids-999879.pt"
20
  ]
21
  },
22
  {
23
- "steps": 1000007,
24
- "file_path": "results/Pyramids Training/Pyramids/Pyramids-1000007.onnx",
25
- "reward": 1.5129999443888664,
26
- "creation_time": 1674420181.3795576,
27
  "auxillary_file_paths": [
28
- "results/Pyramids Training/Pyramids/Pyramids-1000007.pt"
29
  ]
30
  }
31
  ],
32
  "final_checkpoint": {
33
- "steps": 1000007,
34
- "file_path": "results/Pyramids Training/Pyramids.onnx",
35
- "reward": 1.5129999443888664,
36
- "creation_time": 1674420181.3795576,
37
  "auxillary_file_paths": [
38
- "results/Pyramids Training/Pyramids/Pyramids-1000007.pt"
39
  ]
40
  }
41
  },
 
2
  "Pyramids": {
3
  "checkpoints": [
4
  {
5
+ "steps": 499989,
6
+ "file_path": "results/Pyramids Training2/Pyramids/Pyramids-499989.onnx",
7
+ "reward": 0.07259994745254517,
8
+ "creation_time": 1674422823.8779445,
9
  "auxillary_file_paths": [
10
+ "results/Pyramids Training2/Pyramids/Pyramids-499989.pt"
11
  ]
12
  },
13
  {
14
+ "steps": 999983,
15
+ "file_path": "results/Pyramids Training2/Pyramids/Pyramids-999983.onnx",
16
+ "reward": 1.6389999687671661,
17
+ "creation_time": 1674423852.8117085,
18
  "auxillary_file_paths": [
19
+ "results/Pyramids Training2/Pyramids/Pyramids-999983.pt"
20
  ]
21
  },
22
  {
23
+ "steps": 1000111,
24
+ "file_path": "results/Pyramids Training2/Pyramids/Pyramids-1000111.onnx",
25
+ "reward": 1.6389999687671661,
26
+ "creation_time": 1674423852.9078035,
27
  "auxillary_file_paths": [
28
+ "results/Pyramids Training2/Pyramids/Pyramids-1000111.pt"
29
  ]
30
  }
31
  ],
32
  "final_checkpoint": {
33
+ "steps": 1000111,
34
+ "file_path": "results/Pyramids Training2/Pyramids.onnx",
35
+ "reward": 1.6389999687671661,
36
+ "creation_time": 1674423852.9078035,
37
  "auxillary_file_paths": [
38
+ "results/Pyramids Training2/Pyramids/Pyramids-1000111.pt"
39
  ]
40
  }
41
  },