ahmadsy commited on
Commit
9208de8
·
verified ·
1 Parent(s): 5d0e0b7

First Push

Browse files
SnowballTarget.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50d1f81df84008779548858598dcb8c892f96b6c482a5881b3a3ea24ba84759d
3
  size 650646
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9a557a4a6a87a7f178c9f3df20cdd0fdd465ad99fe574bc4c4c215878f928e2
3
  size 650646
SnowballTarget/SnowballTarget-406824.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50d1f81df84008779548858598dcb8c892f96b6c482a5881b3a3ea24ba84759d
3
  size 650646
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a1add58d5d5336a20f8124dce73dc85d62fba498234114d6fa0cd47a93b2f0c
3
  size 650646
SnowballTarget/SnowballTarget-406824.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7d54bc65bc6d216763cef9d858ea3726778608abccecfcea104f965845cf90a
3
  size 3849115
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:615b9100a2e0b784cfdee94c3a6c36da2d7306bcabf9be0477a4347d5bbaadb2
3
  size 3849115
SnowballTarget/SnowballTarget-449968.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:367eb8d92437e393c3bec87882081ccc143d0c967a13f7f04318d9536800d863
3
+ size 650646
SnowballTarget/SnowballTarget-449968.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b9d065475cbc1897a26ae0070ca423e867d206649013363c2cd7b7fad693d4c
3
+ size 3849115
SnowballTarget/SnowballTarget-499992.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ccb1707a5de514778334eaff2320ae90d28f8250f9b352a5066afa647477768
3
+ size 650646
SnowballTarget/SnowballTarget-499992.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecb39cf3a1a5100a7837a8f252f9e3c1613868a46f8887dafd97522c887b0e4e
3
+ size 3849115
SnowballTarget/SnowballTarget-549952.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67dbdcb747bacd4dad239c55aa8625f8f7df105645de23e955b5c0802b68c86b
3
+ size 650646
SnowballTarget/SnowballTarget-549952.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba3619dc3a0a88440c37ad89170dbf03e43a62ca247069571bc48190ffe27bdc
3
+ size 3849115
SnowballTarget/SnowballTarget-599952.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3f03885e398e0beba36e89a64555589229919b59a5695ab4f261d47c9d7bedb
3
+ size 650646
SnowballTarget/SnowballTarget-599952.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:413c75b77901851d4bb9d7edd06757391aa6d270dae25409642a5cd0dc57152e
3
+ size 3849115
SnowballTarget/SnowballTarget-649976.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cca34e2cbbf96a0d5b682facfb4fd01ee1d69e324bd950e3330587dc850d8f17
3
+ size 650646
SnowballTarget/SnowballTarget-649976.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d20cd0ab936b17fe132e07597dd86639278fac33d1caff09c7b17ac67b639651
3
+ size 3849115
SnowballTarget/SnowballTarget-698632.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9a557a4a6a87a7f178c9f3df20cdd0fdd465ad99fe574bc4c4c215878f928e2
3
+ size 650646
SnowballTarget/SnowballTarget-698632.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb723ca1ae17e8fa0279bda651590fb8b2a5fcf701f8d337eaeb82fde5b10e6d
3
+ size 3849115
SnowballTarget/checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a4139d7bcbad8fa860295d1954d6783b75c91d765024afa6fe150b84f9a090b
3
  size 3848290
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1e635bb89b61b42cf4e282b609e50d51c2a3a229c93d22c1ec6d00f2b7ce4a6
3
  size 3848290
SnowballTarget/events.out.tfevents.1731517845.e54276972864.12362.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:696372458c21944a251baa7b22e980964af1692c96e3b679ff119a4976b81553
3
+ size 1114
SnowballTarget/events.out.tfevents.1731517926.e54276972864.12739.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08e4cfe33516ab6e4547a190efaf75ff27706d6fc0509cc9c877764bdfc8c399
3
+ size 1114
SnowballTarget/events.out.tfevents.1731518014.e54276972864.13148.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:191d200ebecb87aed24e7d5d55e74a6f43cd1903d5079b19241d4f05f81f9842
3
+ size 24696
run_logs/Player-0.log CHANGED
@@ -31,7 +31,7 @@ ALSA lib pcm.c:2664:(snd_pcm_open_noupdate) Unknown PCM default
31
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
32
  FMOD initialized on nosound output
33
  Begin MonoManager ReloadAssembly
34
- - Completed reload, in 0.128 seconds
35
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
36
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
37
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
@@ -42,7 +42,7 @@ ERROR: Shader Standard shader is not supported on this GPU (none of subshaders/f
42
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
43
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
44
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
45
- UnloadTime: 0.869375 ms
46
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
47
  requesting resize 84 x 84
48
  Setting up 1 worker threads for Enlighten.
@@ -50,7 +50,7 @@ Memory Statistics:
50
  [ALLOC_TEMP_TLS] TLS Allocator
51
  StackAllocators :
52
  [ALLOC_TEMP_MAIN]
53
- Peak usage frame count: [8.0 KB-16.0 KB]: 5557 frames, [16.0 KB-32.0 KB]: 94 frames, [2.0 MB-4.0 MB]: 1 frames
54
  Initial Block Size 4.0 MB
55
  Current Block Size 4.0 MB
56
  Peak Allocated Bytes 2.0 MB
@@ -166,22 +166,22 @@ Memory Statistics:
166
  Peak Allocated Bytes 0 B
167
  Overflow Count 0
168
  [ALLOC_DEFAULT] Dual Thread Allocator
169
- Peak main deferred allocation count 36
170
  [ALLOC_BUCKET]
171
  Large Block size 4.0 MB
172
  Used Block count 1
173
  Peak Allocated bytes 1.0 MB
174
  [ALLOC_DEFAULT_MAIN]
175
- Peak usage frame count: [4.0 MB-8.0 MB]: 5652 frames
176
  Requested Block Size 16.0 MB
177
  Peak Block count 1
178
- Peak Allocated memory 7.2 MB
179
  Peak Large allocation bytes 0 B
180
  [ALLOC_DEFAULT_THREAD]
181
- Peak usage frame count: [16.0 MB-32.0 MB]: 5652 frames
182
  Requested Block Size 16.0 MB
183
  Peak Block count 1
184
- Peak Allocated memory 17.4 MB
185
  Peak Large allocation bytes 16.0 MB
186
  [ALLOC_TEMP_JOB_1_FRAME]
187
  Initial Block Size 2.0 MB
@@ -210,13 +210,13 @@ Memory Statistics:
210
  Used Block count 1
211
  Peak Allocated bytes 1.0 MB
212
  [ALLOC_GFX_MAIN]
213
- Peak usage frame count: [32.0 KB-64.0 KB]: 5470 frames, [64.0 KB-128.0 KB]: 182 frames
214
  Requested Block Size 16.0 MB
215
  Peak Block count 1
216
  Peak Allocated memory 66.0 KB
217
  Peak Large allocation bytes 0 B
218
  [ALLOC_GFX_THREAD]
219
- Peak usage frame count: [32.0 KB-64.0 KB]: 5652 frames
220
  Requested Block Size 16.0 MB
221
  Peak Block count 1
222
  Peak Allocated memory 39.6 KB
@@ -228,13 +228,13 @@ Memory Statistics:
228
  Used Block count 1
229
  Peak Allocated bytes 1.0 MB
230
  [ALLOC_CACHEOBJECTS_MAIN]
231
- Peak usage frame count: [0.5 MB-1.0 MB]: 5652 frames
232
  Requested Block Size 4.0 MB
233
  Peak Block count 1
234
  Peak Allocated memory 0.6 MB
235
  Peak Large allocation bytes 0 B
236
  [ALLOC_CACHEOBJECTS_THREAD]
237
- Peak usage frame count: [0.5 MB-1.0 MB]: 5651 frames, [2.0 MB-4.0 MB]: 1 frames
238
  Requested Block Size 4.0 MB
239
  Peak Block count 1
240
  Peak Allocated memory 2.2 MB
@@ -246,13 +246,13 @@ Memory Statistics:
246
  Used Block count 1
247
  Peak Allocated bytes 1.0 MB
248
  [ALLOC_TYPETREE_MAIN]
249
- Peak usage frame count: [0-1.0 KB]: 5652 frames
250
  Requested Block Size 2.0 MB
251
  Peak Block count 1
252
  Peak Allocated memory 1.0 KB
253
  Peak Large allocation bytes 0 B
254
  [ALLOC_TYPETREE_THREAD]
255
- Peak usage frame count: [1.0 KB-2.0 KB]: 5652 frames
256
  Requested Block Size 2.0 MB
257
  Peak Block count 1
258
  Peak Allocated memory 1.7 KB
 
31
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
32
  FMOD initialized on nosound output
33
  Begin MonoManager ReloadAssembly
34
+ - Completed reload, in 0.162 seconds
35
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
36
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
37
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
 
42
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
43
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
44
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
45
+ UnloadTime: 1.104166 ms
46
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
47
  requesting resize 84 x 84
48
  Setting up 1 worker threads for Enlighten.
 
50
  [ALLOC_TEMP_TLS] TLS Allocator
51
  StackAllocators :
52
  [ALLOC_TEMP_MAIN]
53
+ Peak usage frame count: [8.0 KB-16.0 KB]: 7828 frames, [16.0 KB-32.0 KB]: 132 frames, [2.0 MB-4.0 MB]: 1 frames
54
  Initial Block Size 4.0 MB
55
  Current Block Size 4.0 MB
56
  Peak Allocated Bytes 2.0 MB
 
166
  Peak Allocated Bytes 0 B
167
  Overflow Count 0
168
  [ALLOC_DEFAULT] Dual Thread Allocator
169
+ Peak main deferred allocation count 37
170
  [ALLOC_BUCKET]
171
  Large Block size 4.0 MB
172
  Used Block count 1
173
  Peak Allocated bytes 1.0 MB
174
  [ALLOC_DEFAULT_MAIN]
175
+ Peak usage frame count: [4.0 MB-8.0 MB]: 7961 frames
176
  Requested Block Size 16.0 MB
177
  Peak Block count 1
178
+ Peak Allocated memory 7.4 MB
179
  Peak Large allocation bytes 0 B
180
  [ALLOC_DEFAULT_THREAD]
181
+ Peak usage frame count: [16.0 MB-32.0 MB]: 7961 frames
182
  Requested Block Size 16.0 MB
183
  Peak Block count 1
184
+ Peak Allocated memory 17.8 MB
185
  Peak Large allocation bytes 16.0 MB
186
  [ALLOC_TEMP_JOB_1_FRAME]
187
  Initial Block Size 2.0 MB
 
210
  Used Block count 1
211
  Peak Allocated bytes 1.0 MB
212
  [ALLOC_GFX_MAIN]
213
+ Peak usage frame count: [32.0 KB-64.0 KB]: 7771 frames, [64.0 KB-128.0 KB]: 190 frames
214
  Requested Block Size 16.0 MB
215
  Peak Block count 1
216
  Peak Allocated memory 66.0 KB
217
  Peak Large allocation bytes 0 B
218
  [ALLOC_GFX_THREAD]
219
+ Peak usage frame count: [32.0 KB-64.0 KB]: 7961 frames
220
  Requested Block Size 16.0 MB
221
  Peak Block count 1
222
  Peak Allocated memory 39.6 KB
 
228
  Used Block count 1
229
  Peak Allocated bytes 1.0 MB
230
  [ALLOC_CACHEOBJECTS_MAIN]
231
+ Peak usage frame count: [0.5 MB-1.0 MB]: 7961 frames
232
  Requested Block Size 4.0 MB
233
  Peak Block count 1
234
  Peak Allocated memory 0.6 MB
235
  Peak Large allocation bytes 0 B
236
  [ALLOC_CACHEOBJECTS_THREAD]
237
+ Peak usage frame count: [0.5 MB-1.0 MB]: 7960 frames, [2.0 MB-4.0 MB]: 1 frames
238
  Requested Block Size 4.0 MB
239
  Peak Block count 1
240
  Peak Allocated memory 2.2 MB
 
246
  Used Block count 1
247
  Peak Allocated bytes 1.0 MB
248
  [ALLOC_TYPETREE_MAIN]
249
+ Peak usage frame count: [0-1.0 KB]: 7961 frames
250
  Requested Block Size 2.0 MB
251
  Peak Block count 1
252
  Peak Allocated memory 1.0 KB
253
  Peak Large allocation bytes 0 B
254
  [ALLOC_TYPETREE_THREAD]
255
+ Peak usage frame count: [1.0 KB-2.0 KB]: 7961 frames
256
  Requested Block Size 2.0 MB
257
  Peak Block count 1
258
  Peak Allocated memory 1.7 KB
run_logs/timers.json CHANGED
@@ -2,153 +2,153 @@
2
  "name": "root",
3
  "gauges": {
4
  "SnowballTarget.Policy.Entropy.mean": {
5
- "value": 0.8013571500778198,
6
- "min": 0.8013571500778198,
7
- "max": 1.0774190425872803,
8
- "count": 20
9
  },
10
  "SnowballTarget.Policy.Entropy.sum": {
11
- "value": 8162.6240234375,
12
- "min": 8047.19482421875,
13
- "max": 11045.7001953125,
14
- "count": 20
15
  },
16
  "SnowballTarget.Step.mean": {
17
- "value": 399968.0,
18
- "min": 209936.0,
19
- "max": 399968.0,
20
- "count": 20
21
  },
22
  "SnowballTarget.Step.sum": {
23
- "value": 399968.0,
24
- "min": 209936.0,
25
- "max": 399968.0,
26
- "count": 20
27
  },
28
  "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
29
- "value": 13.542168617248535,
30
- "min": 12.237586975097656,
31
- "max": 13.542168617248535,
32
- "count": 20
33
  },
34
  "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
35
- "value": 2640.722900390625,
36
- "min": 2349.61669921875,
37
- "max": 2775.94580078125,
38
- "count": 20
39
  },
40
  "SnowballTarget.Environment.EpisodeLength.mean": {
41
  "value": 199.0,
42
  "min": 199.0,
43
  "max": 199.0,
44
- "count": 20
45
  },
46
  "SnowballTarget.Environment.EpisodeLength.sum": {
47
  "value": 8756.0,
48
- "min": 8756.0,
49
  "max": 10945.0,
50
- "count": 20
51
  },
52
  "SnowballTarget.Losses.PolicyLoss.mean": {
53
- "value": 0.07711479002076278,
54
- "min": 0.06354847396318755,
55
- "max": 0.07711479002076278,
56
- "count": 20
57
  },
58
  "SnowballTarget.Losses.PolicyLoss.sum": {
59
- "value": 0.30845916008305113,
60
- "min": 0.267555901117043,
61
- "max": 0.376703390646178,
62
- "count": 20
63
  },
64
  "SnowballTarget.Losses.ValueLoss.mean": {
65
- "value": 0.1769421209088143,
66
- "min": 0.17419704251429616,
67
- "max": 0.24451156159241996,
68
- "count": 20
69
  },
70
  "SnowballTarget.Losses.ValueLoss.sum": {
71
- "value": 0.7077684836352572,
72
- "min": 0.7016009970330725,
73
- "max": 1.2225578079620998,
74
- "count": 20
75
  },
76
  "SnowballTarget.Policy.LearningRate.mean": {
77
- "value": 0.0001815828394724,
78
- "min": 0.0001815828394724,
79
- "max": 0.0002383428205524,
80
- "count": 20
81
  },
82
  "SnowballTarget.Policy.LearningRate.sum": {
83
- "value": 0.0007263313578896,
84
- "min": 0.0007263313578896,
85
- "max": 0.001176864107712,
86
- "count": 20
87
  },
88
  "SnowballTarget.Policy.Epsilon.mean": {
89
- "value": 0.16052760000000005,
90
- "min": 0.16052760000000005,
91
- "max": 0.1794476,
92
- "count": 20
93
  },
94
  "SnowballTarget.Policy.Epsilon.sum": {
95
- "value": 0.6421104000000002,
96
- "min": 0.6421104000000002,
97
- "max": 0.892288,
98
- "count": 20
99
  },
100
  "SnowballTarget.Policy.Beta.mean": {
101
- "value": 0.0030303272400000005,
102
- "min": 0.0030303272400000005,
103
- "max": 0.00397443524,
104
- "count": 20
105
  },
106
  "SnowballTarget.Policy.Beta.sum": {
107
- "value": 0.012121308960000002,
108
- "min": 0.012121308960000002,
109
- "max": 0.0196251712,
110
- "count": 20
111
  },
112
  "SnowballTarget.Environment.CumulativeReward.mean": {
113
- "value": 26.65909090909091,
114
- "min": 25.09090909090909,
115
- "max": 27.272727272727273,
116
- "count": 20
117
  },
118
  "SnowballTarget.Environment.CumulativeReward.sum": {
119
- "value": 1173.0,
120
- "min": 1105.0,
121
- "max": 1459.0,
122
- "count": 20
123
  },
124
  "SnowballTarget.Policy.ExtrinsicReward.mean": {
125
- "value": 26.65909090909091,
126
- "min": 25.09090909090909,
127
- "max": 27.272727272727273,
128
- "count": 20
129
  },
130
  "SnowballTarget.Policy.ExtrinsicReward.sum": {
131
- "value": 1173.0,
132
- "min": 1105.0,
133
- "max": 1459.0,
134
- "count": 20
135
  },
136
  "SnowballTarget.IsTraining.mean": {
137
  "value": 1.0,
138
  "min": 1.0,
139
  "max": 1.0,
140
- "count": 20
141
  },
142
  "SnowballTarget.IsTraining.sum": {
143
  "value": 1.0,
144
  "min": 1.0,
145
  "max": 1.0,
146
- "count": 20
147
  }
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
- "start_time_seconds": "1731516261",
152
  "python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
153
  "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --resume --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
154
  "mlagents_version": "1.2.0.dev0",
@@ -156,59 +156,59 @@
156
  "communication_protocol_version": "1.5.0",
157
  "pytorch_version": "2.5.0+cu121",
158
  "numpy_version": "1.23.5",
159
- "end_time_seconds": "1731516857"
160
  },
161
- "total": 596.407235425,
162
  "count": 1,
163
- "self": 0.006777129999818499,
164
  "children": {
165
  "run_training.setup": {
166
- "total": 0.07258270100010122,
167
  "count": 1,
168
- "self": 0.07258270100010122
169
  },
170
  "TrainerController.start_learning": {
171
- "total": 596.327875594,
172
  "count": 1,
173
- "self": 1.8854217600032825,
174
  "children": {
175
  "TrainerController._reset_env": {
176
- "total": 2.593605476999983,
177
  "count": 1,
178
- "self": 2.593605476999983
179
  },
180
  "TrainerController.advance": {
181
- "total": 591.7501947359965,
182
- "count": 18841,
183
- "self": 0.432665502014288,
184
  "children": {
185
  "env_step": {
186
- "total": 591.3175292339822,
187
- "count": 18841,
188
- "self": 452.74357440195354,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
- "total": 138.1349641260124,
192
- "count": 18841,
193
- "self": 2.1363447949970578,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
- "total": 135.99861933101533,
197
- "count": 18841,
198
- "self": 135.99861933101533
199
  }
200
  }
201
  },
202
  "workers": {
203
- "total": 0.4389907060162841,
204
- "count": 18840,
205
  "self": 0.0,
206
  "children": {
207
  "worker_root": {
208
- "total": 593.5683008930121,
209
- "count": 18840,
210
  "is_parallel": true,
211
- "self": 282.92222518100016,
212
  "children": {
213
  "run_training.setup": {
214
  "total": 0.0,
@@ -217,48 +217,48 @@
217
  "self": 0.0,
218
  "children": {
219
  "steps_from_proto": {
220
- "total": 0.0032678869999926974,
221
  "count": 1,
222
  "is_parallel": true,
223
- "self": 0.001032716000054279,
224
  "children": {
225
  "_process_rank_one_or_two_observation": {
226
- "total": 0.0022351709999384184,
227
  "count": 10,
228
  "is_parallel": true,
229
- "self": 0.0022351709999384184
230
  }
231
  }
232
  },
233
  "UnityEnvironment.step": {
234
- "total": 0.08936316199992689,
235
  "count": 1,
236
  "is_parallel": true,
237
- "self": 0.0007668390001072112,
238
  "children": {
239
  "UnityEnvironment._generate_step_input": {
240
- "total": 0.000505100999816932,
241
  "count": 1,
242
  "is_parallel": true,
243
- "self": 0.000505100999816932
244
  },
245
  "communicator.exchange": {
246
- "total": 0.08585629300000619,
247
  "count": 1,
248
  "is_parallel": true,
249
- "self": 0.08585629300000619
250
  },
251
  "steps_from_proto": {
252
- "total": 0.002234928999996555,
253
  "count": 1,
254
  "is_parallel": true,
255
- "self": 0.00045526099961534783,
256
  "children": {
257
  "_process_rank_one_or_two_observation": {
258
- "total": 0.001779668000381207,
259
  "count": 10,
260
  "is_parallel": true,
261
- "self": 0.001779668000381207
262
  }
263
  }
264
  }
@@ -267,34 +267,34 @@
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
- "total": 310.6460757120119,
271
- "count": 18839,
272
  "is_parallel": true,
273
- "self": 14.666757987014535,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
- "total": 7.4711006559819,
277
- "count": 18839,
278
  "is_parallel": true,
279
- "self": 7.4711006559819
280
  },
281
  "communicator.exchange": {
282
- "total": 244.3855100989997,
283
- "count": 18839,
284
  "is_parallel": true,
285
- "self": 244.3855100989997
286
  },
287
  "steps_from_proto": {
288
- "total": 44.12270697001577,
289
- "count": 18839,
290
  "is_parallel": true,
291
- "self": 8.749952668015112,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
- "total": 35.37275430200066,
295
- "count": 188390,
296
  "is_parallel": true,
297
- "self": 35.37275430200066
298
  }
299
  }
300
  }
@@ -309,9 +309,9 @@
309
  }
310
  },
311
  "trainer_threads": {
312
- "total": 4.646900015359279e-05,
313
  "count": 1,
314
- "self": 4.646900015359279e-05,
315
  "children": {
316
  "thread_root": {
317
  "total": 0.0,
@@ -320,36 +320,36 @@
320
  "self": 0.0,
321
  "children": {
322
  "trainer_advance": {
323
- "total": 583.7782663488981,
324
- "count": 767098,
325
  "is_parallel": true,
326
- "self": 17.1540841019048,
327
  "children": {
328
  "process_trajectory": {
329
- "total": 310.00087308699267,
330
- "count": 767099,
331
  "is_parallel": true,
332
- "self": 309.0049399379932,
333
  "children": {
334
  "RLTrainer._checkpoint": {
335
- "total": 0.9959331489994838,
336
- "count": 4,
337
  "is_parallel": true,
338
- "self": 0.9959331489994838
339
  }
340
  }
341
  },
342
  "_update_policy": {
343
- "total": 256.62330916000064,
344
- "count": 93,
345
  "is_parallel": true,
346
- "self": 67.2340705220065,
347
  "children": {
348
  "TorchPPOOptimizer.update": {
349
- "total": 189.38923863799414,
350
- "count": 4773,
351
  "is_parallel": true,
352
- "self": 189.38923863799414
353
  }
354
  }
355
  }
@@ -360,14 +360,14 @@
360
  }
361
  },
362
  "TrainerController._save_models": {
363
- "total": 0.09860715200011327,
364
  "count": 1,
365
- "self": 0.002142656000160059,
366
  "children": {
367
  "RLTrainer._checkpoint": {
368
- "total": 0.09646449599995321,
369
  "count": 1,
370
- "self": 0.09646449599995321
371
  }
372
  }
373
  }
 
2
  "name": "root",
3
  "gauges": {
4
  "SnowballTarget.Policy.Entropy.mean": {
5
+ "value": 0.6453768610954285,
6
+ "min": 0.6052058935165405,
7
+ "max": 0.8181990385055542,
8
+ "count": 29
9
  },
10
  "SnowballTarget.Policy.Entropy.sum": {
11
+ "value": 6559.6103515625,
12
+ "min": 2979.062744140625,
13
+ "max": 8338.6083984375,
14
+ "count": 29
15
  },
16
  "SnowballTarget.Step.mean": {
17
+ "value": 689960.0,
18
+ "min": 409984.0,
19
+ "max": 689960.0,
20
+ "count": 29
21
  },
22
  "SnowballTarget.Step.sum": {
23
+ "value": 689960.0,
24
+ "min": 409984.0,
25
+ "max": 689960.0,
26
+ "count": 29
27
  },
28
  "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
29
+ "value": 13.772141456604004,
30
+ "min": 13.346248626708984,
31
+ "max": 13.857466697692871,
32
+ "count": 29
33
  },
34
  "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
35
+ "value": 2685.567626953125,
36
+ "min": 799.4398803710938,
37
+ "max": 2823.4833984375,
38
+ "count": 29
39
  },
40
  "SnowballTarget.Environment.EpisodeLength.mean": {
41
  "value": 199.0,
42
  "min": 199.0,
43
  "max": 199.0,
44
+ "count": 29
45
  },
46
  "SnowballTarget.Environment.EpisodeLength.sum": {
47
  "value": 8756.0,
48
+ "min": 2189.0,
49
  "max": 10945.0,
50
+ "count": 29
51
  },
52
  "SnowballTarget.Losses.PolicyLoss.mean": {
53
+ "value": 0.06500108055679091,
54
+ "min": 0.0592020819404199,
55
+ "max": 0.08497456358679756,
56
+ "count": 29
57
  },
58
  "SnowballTarget.Losses.PolicyLoss.sum": {
59
+ "value": 0.26000432222716363,
60
+ "min": 0.08497456358679756,
61
+ "max": 0.3639923398230024,
62
+ "count": 29
63
  },
64
  "SnowballTarget.Losses.ValueLoss.mean": {
65
+ "value": 0.15492069965922364,
66
+ "min": 0.1400732405337633,
67
+ "max": 0.19679481974419424,
68
+ "count": 29
69
  },
70
  "SnowballTarget.Losses.ValueLoss.sum": {
71
+ "value": 0.6196827986368946,
72
+ "min": 0.16414106776937842,
73
+ "max": 0.9178970949906928,
74
+ "count": 29
75
  },
76
  "SnowballTarget.Policy.LearningRate.mean": {
77
+ "value": 9.44892685036e-05,
78
+ "min": 9.44892685036e-05,
79
+ "max": 0.00017731924089360002,
80
+ "count": 29
81
  },
82
  "SnowballTarget.Policy.LearningRate.sum": {
83
+ "value": 0.0003779570740144,
84
+ "min": 0.00017731924089360002,
85
+ "max": 0.000876696207768,
86
+ "count": 29
87
  },
88
  "SnowballTarget.Policy.Epsilon.mean": {
89
+ "value": 0.1314964,
90
+ "min": 0.1314964,
91
+ "max": 0.1591064,
92
+ "count": 29
93
  },
94
  "SnowballTarget.Policy.Epsilon.sum": {
95
+ "value": 0.5259856,
96
+ "min": 0.1591064,
97
+ "max": 0.7922319999999999,
98
+ "count": 29
99
  },
100
  "SnowballTarget.Policy.Beta.mean": {
101
+ "value": 0.0015816703600000004,
102
+ "min": 0.0015816703600000004,
103
+ "max": 0.0029594093600000004,
104
+ "count": 29
105
  },
106
  "SnowballTarget.Policy.Beta.sum": {
107
+ "value": 0.0063266814400000015,
108
+ "min": 0.0029594093600000004,
109
+ "max": 0.014632376800000003,
110
+ "count": 29
111
  },
112
  "SnowballTarget.Environment.CumulativeReward.mean": {
113
+ "value": 27.09090909090909,
114
+ "min": 25.59090909090909,
115
+ "max": 27.181818181818183,
116
+ "count": 29
117
  },
118
  "SnowballTarget.Environment.CumulativeReward.sum": {
119
+ "value": 1192.0,
120
+ "min": 282.0,
121
+ "max": 1489.0,
122
+ "count": 29
123
  },
124
  "SnowballTarget.Policy.ExtrinsicReward.mean": {
125
+ "value": 27.09090909090909,
126
+ "min": 25.59090909090909,
127
+ "max": 27.181818181818183,
128
+ "count": 29
129
  },
130
  "SnowballTarget.Policy.ExtrinsicReward.sum": {
131
+ "value": 1192.0,
132
+ "min": 282.0,
133
+ "max": 1489.0,
134
+ "count": 29
135
  },
136
  "SnowballTarget.IsTraining.mean": {
137
  "value": 1.0,
138
  "min": 1.0,
139
  "max": 1.0,
140
+ "count": 29
141
  },
142
  "SnowballTarget.IsTraining.sum": {
143
  "value": 1.0,
144
  "min": 1.0,
145
  "max": 1.0,
146
+ "count": 29
147
  }
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
+ "start_time_seconds": "1731518013",
152
  "python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
153
  "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --resume --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
154
  "mlagents_version": "1.2.0.dev0",
 
156
  "communication_protocol_version": "1.5.0",
157
  "pytorch_version": "2.5.0+cu121",
158
  "numpy_version": "1.23.5",
159
+ "end_time_seconds": "1731518853"
160
  },
161
+ "total": 840.429411397,
162
  "count": 1,
163
+ "self": 0.36528794899959394,
164
  "children": {
165
  "run_training.setup": {
166
+ "total": 0.11104295700033617,
167
  "count": 1,
168
+ "self": 0.11104295700033617
169
  },
170
  "TrainerController.start_learning": {
171
+ "total": 839.953080491,
172
  "count": 1,
173
+ "self": 1.281243647989868,
174
  "children": {
175
  "TrainerController._reset_env": {
176
+ "total": 3.2445078089999697,
177
  "count": 1,
178
+ "self": 3.2445078089999697
179
  },
180
  "TrainerController.advance": {
181
+ "total": 835.2928897720103,
182
+ "count": 26535,
183
+ "self": 0.6298937230021693,
184
  "children": {
185
  "env_step": {
186
+ "total": 834.6629960490081,
187
+ "count": 26535,
188
+ "self": 640.1278579970003,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
+ "total": 193.92481113003896,
192
+ "count": 26535,
193
+ "self": 2.9560653630492197,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
+ "total": 190.96874576698974,
197
+ "count": 26535,
198
+ "self": 190.96874576698974
199
  }
200
  }
201
  },
202
  "workers": {
203
+ "total": 0.6103269219688627,
204
+ "count": 26534,
205
  "self": 0.0,
206
  "children": {
207
  "worker_root": {
208
+ "total": 837.2221901299704,
209
+ "count": 26534,
210
  "is_parallel": true,
211
+ "self": 405.7934578399522,
212
  "children": {
213
  "run_training.setup": {
214
  "total": 0.0,
 
217
  "self": 0.0,
218
  "children": {
219
  "steps_from_proto": {
220
+ "total": 0.0037471790001291083,
221
  "count": 1,
222
  "is_parallel": true,
223
+ "self": 0.0010018210005000583,
224
  "children": {
225
  "_process_rank_one_or_two_observation": {
226
+ "total": 0.00274535799962905,
227
  "count": 10,
228
  "is_parallel": true,
229
+ "self": 0.00274535799962905
230
  }
231
  }
232
  },
233
  "UnityEnvironment.step": {
234
+ "total": 0.07911048000005394,
235
  "count": 1,
236
  "is_parallel": true,
237
+ "self": 0.0007877650000409631,
238
  "children": {
239
  "UnityEnvironment._generate_step_input": {
240
+ "total": 0.0004636660000869597,
241
  "count": 1,
242
  "is_parallel": true,
243
+ "self": 0.0004636660000869597
244
  },
245
  "communicator.exchange": {
246
+ "total": 0.07536837199995716,
247
  "count": 1,
248
  "is_parallel": true,
249
+ "self": 0.07536837199995716
250
  },
251
  "steps_from_proto": {
252
+ "total": 0.002490676999968855,
253
  "count": 1,
254
  "is_parallel": true,
255
+ "self": 0.0005000929995730985,
256
  "children": {
257
  "_process_rank_one_or_two_observation": {
258
+ "total": 0.0019905840003957564,
259
  "count": 10,
260
  "is_parallel": true,
261
+ "self": 0.0019905840003957564
262
  }
263
  }
264
  }
 
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
+ "total": 431.42873229001816,
271
+ "count": 26533,
272
  "is_parallel": true,
273
+ "self": 20.579289500986306,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
+ "total": 10.255141167016063,
277
+ "count": 26533,
278
  "is_parallel": true,
279
+ "self": 10.255141167016063
280
  },
281
  "communicator.exchange": {
282
+ "total": 338.9035427789836,
283
+ "count": 26533,
284
  "is_parallel": true,
285
+ "self": 338.9035427789836
286
  },
287
  "steps_from_proto": {
288
+ "total": 61.6907588430322,
289
+ "count": 26533,
290
  "is_parallel": true,
291
+ "self": 12.478091371013306,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
+ "total": 49.21266747201889,
295
+ "count": 265330,
296
  "is_parallel": true,
297
+ "self": 49.21266747201889
298
  }
299
  }
300
  }
 
309
  }
310
  },
311
  "trainer_threads": {
312
+ "total": 0.0001709950001895777,
313
  "count": 1,
314
+ "self": 0.0001709950001895777,
315
  "children": {
316
  "thread_root": {
317
  "total": 0.0,
 
320
  "self": 0.0,
321
  "children": {
322
  "trainer_advance": {
323
+ "total": 825.7275560776652,
324
+ "count": 1094165,
325
  "is_parallel": true,
326
+ "self": 25.041207345781913,
327
  "children": {
328
  "process_trajectory": {
329
+ "total": 433.45666232688745,
330
+ "count": 1094165,
331
  "is_parallel": true,
332
+ "self": 432.42008302688737,
333
  "children": {
334
  "RLTrainer._checkpoint": {
335
+ "total": 1.036579300000085,
336
+ "count": 5,
337
  "is_parallel": true,
338
+ "self": 1.036579300000085
339
  }
340
  }
341
  },
342
  "_update_policy": {
343
+ "total": 367.22968640499585,
344
+ "count": 132,
345
  "is_parallel": true,
346
+ "self": 92.73948547499185,
347
  "children": {
348
  "TorchPPOOptimizer.update": {
349
+ "total": 274.490200930004,
350
+ "count": 6729,
351
  "is_parallel": true,
352
+ "self": 274.490200930004
353
  }
354
  }
355
  }
 
360
  }
361
  },
362
  "TrainerController._save_models": {
363
+ "total": 0.13426826699969752,
364
  "count": 1,
365
+ "self": 0.0030220349995033757,
366
  "children": {
367
  "RLTrainer._checkpoint": {
368
+ "total": 0.13124623200019414,
369
  "count": 1,
370
+ "self": 0.13124623200019414
371
  }
372
  }
373
  }
run_logs/training_status.json CHANGED
@@ -2,103 +2,103 @@
2
  "SnowballTarget": {
3
  "checkpoints": [
4
  {
5
- "steps": 49936,
6
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-49936.onnx",
7
- "reward": 15.090909090909092,
8
- "creation_time": 1731515800.1786923,
9
  "auxillary_file_paths": [
10
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-49936.pt"
11
  ]
12
  },
13
  {
14
- "steps": 99960,
15
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-99960.onnx",
16
- "reward": 18.363636363636363,
17
- "creation_time": 1731515944.0305312,
18
  "auxillary_file_paths": [
19
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-99960.pt"
20
  ]
21
  },
22
  {
23
- "steps": 149984,
24
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-149984.onnx",
25
- "reward": 20.545454545454547,
26
- "creation_time": 1731516090.745955,
27
  "auxillary_file_paths": [
28
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-149984.pt"
29
  ]
30
  },
31
  {
32
- "steps": 199984,
33
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-199984.onnx",
34
- "reward": 24.272727272727273,
35
- "creation_time": 1731516233.877326,
36
  "auxillary_file_paths": [
37
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-199984.pt"
38
  ]
39
  },
40
  {
41
- "steps": 200112,
42
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-200112.onnx",
43
- "reward": 24.272727272727273,
44
- "creation_time": 1731516233.9988563,
45
  "auxillary_file_paths": [
46
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-200112.pt"
47
  ]
48
  },
49
  {
50
- "steps": 249984,
51
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-249984.onnx",
52
- "reward": 26.09090909090909,
53
- "creation_time": 1731516404.853397,
54
  "auxillary_file_paths": [
55
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-249984.pt"
56
  ]
57
  },
58
  {
59
- "steps": 299944,
60
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-299944.onnx",
61
- "reward": 25.727272727272727,
62
- "creation_time": 1731516547.6171284,
63
  "auxillary_file_paths": [
64
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-299944.pt"
65
  ]
66
  },
67
  {
68
- "steps": 349968,
69
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-349968.onnx",
70
- "reward": 25.90909090909091,
71
- "creation_time": 1731516693.8046763,
72
  "auxillary_file_paths": [
73
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-349968.pt"
74
  ]
75
  },
76
  {
77
- "steps": 399968,
78
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-399968.onnx",
79
  "reward": 27.818181818181817,
80
- "creation_time": 1731516836.5577378,
81
  "auxillary_file_paths": [
82
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-399968.pt"
83
  ]
84
  },
85
  {
86
- "steps": 406824,
87
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-406824.onnx",
88
- "reward": null,
89
- "creation_time": 1731516857.428955,
90
  "auxillary_file_paths": [
91
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-406824.pt"
92
  ]
93
  }
94
  ],
95
  "final_checkpoint": {
96
- "steps": 406824,
97
  "file_path": "results/SnowballTarget1/SnowballTarget.onnx",
98
- "reward": null,
99
- "creation_time": 1731516857.428955,
100
  "auxillary_file_paths": [
101
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-406824.pt"
102
  ]
103
  }
104
  },
 
2
  "SnowballTarget": {
3
  "checkpoints": [
4
  {
5
+ "steps": 399968,
6
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-399968.onnx",
7
+ "reward": 27.818181818181817,
8
+ "creation_time": 1731516836.5577378,
9
  "auxillary_file_paths": [
10
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-399968.pt"
11
  ]
12
  },
13
  {
14
+ "steps": 406824,
15
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-406824.onnx",
16
+ "reward": null,
17
+ "creation_time": 1731516857.428955,
18
  "auxillary_file_paths": [
19
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-406824.pt"
20
  ]
21
  },
22
  {
23
+ "steps": 406824,
24
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-406824.onnx",
25
+ "reward": null,
26
+ "creation_time": 1731517847.9200594,
27
  "auxillary_file_paths": [
28
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-406824.pt"
29
  ]
30
  },
31
  {
32
+ "steps": 406824,
33
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-406824.onnx",
34
+ "reward": null,
35
+ "creation_time": 1731517927.6961749,
36
  "auxillary_file_paths": [
37
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-406824.pt"
38
  ]
39
  },
40
  {
41
+ "steps": 449968,
42
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-449968.onnx",
43
+ "reward": 26.545454545454547,
44
+ "creation_time": 1731518139.0536435,
45
  "auxillary_file_paths": [
46
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-449968.pt"
47
  ]
48
  },
49
  {
50
+ "steps": 499992,
51
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-499992.onnx",
52
+ "reward": 26.818181818181817,
53
+ "creation_time": 1731518283.4261417,
54
  "auxillary_file_paths": [
55
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-499992.pt"
56
  ]
57
  },
58
  {
59
+ "steps": 549952,
60
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-549952.onnx",
61
+ "reward": 26.181818181818183,
62
+ "creation_time": 1731518427.8825192,
63
  "auxillary_file_paths": [
64
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-549952.pt"
65
  ]
66
  },
67
  {
68
+ "steps": 599952,
69
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-599952.onnx",
70
+ "reward": 26.636363636363637,
71
+ "creation_time": 1731518570.242367,
72
  "auxillary_file_paths": [
73
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-599952.pt"
74
  ]
75
  },
76
  {
77
+ "steps": 649976,
78
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-649976.onnx",
79
  "reward": 27.818181818181817,
80
+ "creation_time": 1731518714.1479263,
81
  "auxillary_file_paths": [
82
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-649976.pt"
83
  ]
84
  },
85
  {
86
+ "steps": 698632,
87
+ "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-698632.onnx",
88
+ "reward": 26.545454545454547,
89
+ "creation_time": 1731518853.3072882,
90
  "auxillary_file_paths": [
91
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-698632.pt"
92
  ]
93
  }
94
  ],
95
  "final_checkpoint": {
96
+ "steps": 698632,
97
  "file_path": "results/SnowballTarget1/SnowballTarget.onnx",
98
+ "reward": 26.545454545454547,
99
+ "creation_time": 1731518853.3072882,
100
  "auxillary_file_paths": [
101
+ "results/SnowballTarget1/SnowballTarget/SnowballTarget-698632.pt"
102
  ]
103
  }
104
  },