praneethd7 commited on
Commit
101722a
·
verified ·
1 Parent(s): 373d86e

UTEL-UIUC/ft-aug-mask2former-swin-small-ade-semantic

Browse files
Files changed (6) hide show
  1. README.md +45 -27
  2. all_results.json +6 -6
  3. model.safetensors +1 -1
  4. train_results.json +6 -6
  5. trainer_state.json +474 -189
  6. training_args.bin +1 -1
README.md CHANGED
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [facebook/mask2former-swin-tiny-ade-semantic](https://huggingface.co/facebook/mask2former-swin-tiny-ade-semantic) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 36.8996
20
 
21
  ## Model description
22
 
@@ -43,37 +43,55 @@ The following hyperparameters were used during training:
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 4
45
  - num_epochs: 100
46
- - mixed_precision_training: Native AMP
47
 
48
  ### Training results
49
 
50
  | Training Loss | Epoch | Step | Validation Loss |
51
  |:-------------:|:------:|:----:|:---------------:|
52
- | 54.3035 | 0.1408 | 50 | 48.1664 |
53
- | 41.3859 | 0.2817 | 100 | 42.7191 |
54
- | 37.7752 | 0.4225 | 150 | 39.3265 |
55
- | 35.7194 | 0.5634 | 200 | 37.9619 |
56
- | 34.6249 | 0.7042 | 250 | 37.8358 |
57
- | 33.9646 | 0.8451 | 300 | 37.6389 |
58
- | 33.0984 | 0.9859 | 350 | 36.2771 |
59
- | 32.1659 | 1.1268 | 400 | 36.4889 |
60
- | 32.1228 | 1.2676 | 450 | 36.7327 |
61
- | 30.6904 | 1.4085 | 500 | 36.4485 |
62
- | 31.1939 | 1.5493 | 550 | 36.8185 |
63
- | 30.6071 | 1.6901 | 600 | 38.7391 |
64
- | 30.6755 | 1.8310 | 650 | 36.8563 |
65
- | 30.2044 | 1.9718 | 700 | 36.0311 |
66
- | 29.5483 | 2.1127 | 750 | 36.1058 |
67
- | 29.2086 | 2.2535 | 800 | 36.9260 |
68
- | 28.5485 | 2.3944 | 850 | 36.2718 |
69
- | 29.5145 | 2.5352 | 900 | 35.7341 |
70
- | 28.6636 | 2.6761 | 950 | 36.8405 |
71
- | 28.9409 | 2.8169 | 1000 | 36.5258 |
72
- | 28.5857 | 2.9577 | 1050 | 35.2844 |
73
- | 27.9033 | 3.0986 | 1100 | 36.2973 |
74
- | 27.3906 | 3.2394 | 1150 | 36.6364 |
75
- | 27.6448 | 3.3803 | 1200 | 36.4092 |
76
- | 27.683 | 3.5211 | 1250 | 36.8996 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
 
79
  ### Framework versions
 
16
 
17
  This model is a fine-tuned version of [facebook/mask2former-swin-tiny-ade-semantic](https://huggingface.co/facebook/mask2former-swin-tiny-ade-semantic) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 29.1112
20
 
21
  ## Model description
22
 
 
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 4
45
  - num_epochs: 100
 
46
 
47
  ### Training results
48
 
49
  | Training Loss | Epoch | Step | Validation Loss |
50
  |:-------------:|:------:|:----:|:---------------:|
51
+ | 50.7018 | 0.1408 | 50 | 44.2435 |
52
+ | 40.5877 | 0.2817 | 100 | 39.6465 |
53
+ | 37.4102 | 0.4225 | 150 | 37.2471 |
54
+ | 35.7502 | 0.5634 | 200 | 36.3455 |
55
+ | 34.7067 | 0.7042 | 250 | 34.8824 |
56
+ | 34.0798 | 0.8451 | 300 | 34.8520 |
57
+ | 33.3503 | 0.9859 | 350 | 33.7321 |
58
+ | 32.3436 | 1.1268 | 400 | 33.1560 |
59
+ | 32.3845 | 1.2676 | 450 | 33.0411 |
60
+ | 30.8809 | 1.4085 | 500 | 32.7852 |
61
+ | 31.689 | 1.5493 | 550 | 31.9914 |
62
+ | 31.036 | 1.6901 | 600 | 32.7297 |
63
+ | 30.9795 | 1.8310 | 650 | 31.8848 |
64
+ | 30.7918 | 1.9718 | 700 | 31.5285 |
65
+ | 30.1432 | 2.1127 | 750 | 32.0634 |
66
+ | 29.7082 | 2.2535 | 800 | 31.1849 |
67
+ | 28.7869 | 2.3944 | 850 | 30.9022 |
68
+ | 29.4227 | 2.5352 | 900 | 30.5902 |
69
+ | 29.1865 | 2.6761 | 950 | 30.3818 |
70
+ | 29.2715 | 2.8169 | 1000 | 30.9196 |
71
+ | 29.1941 | 2.9577 | 1050 | 30.8163 |
72
+ | 28.5256 | 3.0986 | 1100 | 30.4730 |
73
+ | 28.0419 | 3.2394 | 1150 | 30.6531 |
74
+ | 28.0538 | 3.3803 | 1200 | 30.0779 |
75
+ | 27.9463 | 3.5211 | 1250 | 30.6114 |
76
+ | 27.4152 | 3.6620 | 1300 | 30.5519 |
77
+ | 27.7461 | 3.8028 | 1350 | 29.5641 |
78
+ | 27.5604 | 3.9437 | 1400 | 30.1296 |
79
+ | 27.381 | 4.0845 | 1450 | 30.5017 |
80
+ | 26.3816 | 4.2254 | 1500 | 29.6898 |
81
+ | 26.5218 | 4.3662 | 1550 | 29.9475 |
82
+ | 26.9798 | 4.5070 | 1600 | 29.3323 |
83
+ | 26.8186 | 4.6479 | 1650 | 29.5755 |
84
+ | 27.5111 | 4.7887 | 1700 | 30.7945 |
85
+ | 27.0839 | 4.9296 | 1750 | 29.4147 |
86
+ | 26.6393 | 5.0704 | 1800 | 28.7983 |
87
+ | 26.3564 | 5.2113 | 1850 | 29.2245 |
88
+ | 25.6174 | 5.3521 | 1900 | 28.9337 |
89
+ | 25.8777 | 5.4930 | 1950 | 29.4778 |
90
+ | 25.6848 | 5.6338 | 2000 | 28.4992 |
91
+ | 26.4625 | 5.7746 | 2050 | 29.6182 |
92
+ | 26.8448 | 5.9155 | 2100 | 29.5377 |
93
+ | 26.0681 | 6.0563 | 2150 | 29.2390 |
94
+ | 25.628 | 6.1972 | 2200 | 29.1112 |
95
 
96
 
97
  ### Framework versions
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 3.52112676056338,
3
- "total_flos": 8.940768232852685e+18,
4
- "train_loss": 32.0865517578125,
5
- "train_runtime": 9282.3659,
6
- "train_samples_per_second": 122.329,
7
- "train_steps_per_second": 3.824
8
  }
 
1
  {
2
+ "epoch": 6.197183098591549,
3
+ "total_flos": 8.850907754333798e+18,
4
+ "train_loss": 29.80798134543679,
5
+ "train_runtime": 17574.1061,
6
+ "train_samples_per_second": 64.612,
7
+ "train_steps_per_second": 2.02
8
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b009193ff8a99ce11f3e6842d3c351f8441fca3ee628de25a99a5518be0202c1
3
  size 190070416
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14b34ad757d964baed8cb111ffe1387710bae463d2b89500a89e153820b9681b
3
  size 190070416
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 3.52112676056338,
3
- "total_flos": 8.940768232852685e+18,
4
- "train_loss": 32.0865517578125,
5
- "train_runtime": 9282.3659,
6
- "train_samples_per_second": 122.329,
7
- "train_steps_per_second": 3.824
8
  }
 
1
  {
2
+ "epoch": 6.197183098591549,
3
+ "total_flos": 8.850907754333798e+18,
4
+ "train_loss": 29.80798134543679,
5
+ "train_runtime": 17574.1061,
6
+ "train_samples_per_second": 64.612,
7
+ "train_steps_per_second": 2.02
8
  }
trainer_state.json CHANGED
@@ -1,396 +1,681 @@
1
  {
2
- "best_metric": 35.73408889770508,
3
- "best_model_checkpoint": "mask2former/checkpoint-900",
4
- "epoch": 3.52112676056338,
5
  "eval_steps": 50,
6
- "global_step": 1250,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.14084507042253522,
13
- "grad_norm": 262.4289245605469,
14
- "learning_rate": 4.994365562316881e-05,
15
- "loss": 54.3035,
16
  "step": 50
17
  },
18
  {
19
  "epoch": 0.14084507042253522,
20
- "eval_loss": 48.166358947753906,
21
- "eval_runtime": 112.5704,
22
- "eval_samples_per_second": 11.211,
23
- "eval_steps_per_second": 1.404,
24
  "step": 50
25
  },
26
  {
27
  "epoch": 0.28169014084507044,
28
- "grad_norm": 190.41722106933594,
29
- "learning_rate": 4.987322515212982e-05,
30
- "loss": 41.3859,
31
  "step": 100
32
  },
33
  {
34
  "epoch": 0.28169014084507044,
35
- "eval_loss": 42.71906280517578,
36
- "eval_runtime": 105.2195,
37
- "eval_samples_per_second": 11.994,
38
- "eval_steps_per_second": 1.502,
39
  "step": 100
40
  },
41
  {
42
  "epoch": 0.4225352112676056,
43
- "grad_norm": 284.4668273925781,
44
- "learning_rate": 4.980279468109083e-05,
45
- "loss": 37.7752,
46
  "step": 150
47
  },
48
  {
49
  "epoch": 0.4225352112676056,
50
- "eval_loss": 39.32645797729492,
51
- "eval_runtime": 116.3259,
52
- "eval_samples_per_second": 10.849,
53
- "eval_steps_per_second": 1.358,
54
  "step": 150
55
  },
56
  {
57
  "epoch": 0.5633802816901409,
58
- "grad_norm": 201.9960174560547,
59
- "learning_rate": 4.973236421005184e-05,
60
- "loss": 35.7194,
61
  "step": 200
62
  },
63
  {
64
  "epoch": 0.5633802816901409,
65
- "eval_loss": 37.961944580078125,
66
- "eval_runtime": 106.9871,
67
- "eval_samples_per_second": 11.796,
68
- "eval_steps_per_second": 1.477,
69
  "step": 200
70
  },
71
  {
72
  "epoch": 0.704225352112676,
73
- "grad_norm": 173.2267608642578,
74
- "learning_rate": 4.966193373901285e-05,
75
- "loss": 34.6249,
76
  "step": 250
77
  },
78
  {
79
  "epoch": 0.704225352112676,
80
- "eval_loss": 37.83575439453125,
81
- "eval_runtime": 124.9064,
82
- "eval_samples_per_second": 10.104,
83
- "eval_steps_per_second": 1.265,
84
  "step": 250
85
  },
86
  {
87
  "epoch": 0.8450704225352113,
88
- "grad_norm": 174.05532836914062,
89
- "learning_rate": 4.959150326797386e-05,
90
- "loss": 33.9646,
91
  "step": 300
92
  },
93
  {
94
  "epoch": 0.8450704225352113,
95
- "eval_loss": 37.63886642456055,
96
- "eval_runtime": 111.95,
97
- "eval_samples_per_second": 11.273,
98
- "eval_steps_per_second": 1.411,
99
  "step": 300
100
  },
101
  {
102
  "epoch": 0.9859154929577465,
103
- "grad_norm": 137.86322021484375,
104
- "learning_rate": 4.952107279693487e-05,
105
- "loss": 33.0984,
106
  "step": 350
107
  },
108
  {
109
  "epoch": 0.9859154929577465,
110
- "eval_loss": 36.277099609375,
111
- "eval_runtime": 119.8333,
112
- "eval_samples_per_second": 10.531,
113
- "eval_steps_per_second": 1.318,
114
  "step": 350
115
  },
116
  {
117
  "epoch": 1.1267605633802817,
118
- "grad_norm": 179.84205627441406,
119
- "learning_rate": 4.945064232589588e-05,
120
- "loss": 32.1659,
121
  "step": 400
122
  },
123
  {
124
  "epoch": 1.1267605633802817,
125
- "eval_loss": 36.488914489746094,
126
- "eval_runtime": 109.4208,
127
- "eval_samples_per_second": 11.533,
128
- "eval_steps_per_second": 1.444,
129
  "step": 400
130
  },
131
  {
132
  "epoch": 1.267605633802817,
133
- "grad_norm": 255.6558074951172,
134
- "learning_rate": 4.938021185485689e-05,
135
- "loss": 32.1228,
136
  "step": 450
137
  },
138
  {
139
  "epoch": 1.267605633802817,
140
- "eval_loss": 36.73270034790039,
141
- "eval_runtime": 129.1375,
142
- "eval_samples_per_second": 9.773,
143
- "eval_steps_per_second": 1.224,
144
  "step": 450
145
  },
146
  {
147
  "epoch": 1.408450704225352,
148
- "grad_norm": 160.42005920410156,
149
- "learning_rate": 4.93097813838179e-05,
150
- "loss": 30.6904,
151
  "step": 500
152
  },
153
  {
154
  "epoch": 1.408450704225352,
155
- "eval_loss": 36.44846725463867,
156
- "eval_runtime": 109.3219,
157
- "eval_samples_per_second": 11.544,
158
- "eval_steps_per_second": 1.445,
159
  "step": 500
160
  },
161
  {
162
  "epoch": 1.5492957746478875,
163
- "grad_norm": 221.75514221191406,
164
- "learning_rate": 4.923935091277891e-05,
165
- "loss": 31.1939,
166
  "step": 550
167
  },
168
  {
169
  "epoch": 1.5492957746478875,
170
- "eval_loss": 36.818477630615234,
171
- "eval_runtime": 114.8185,
172
- "eval_samples_per_second": 10.991,
173
- "eval_steps_per_second": 1.376,
174
  "step": 550
175
  },
176
  {
177
  "epoch": 1.6901408450704225,
178
- "grad_norm": 186.95262145996094,
179
- "learning_rate": 4.916892044173992e-05,
180
- "loss": 30.6071,
181
  "step": 600
182
  },
183
  {
184
  "epoch": 1.6901408450704225,
185
- "eval_loss": 38.739051818847656,
186
- "eval_runtime": 107.1992,
187
- "eval_samples_per_second": 11.772,
188
- "eval_steps_per_second": 1.474,
189
  "step": 600
190
  },
191
  {
192
  "epoch": 1.8309859154929577,
193
- "grad_norm": 162.2119140625,
194
- "learning_rate": 4.909848997070093e-05,
195
- "loss": 30.6755,
196
  "step": 650
197
  },
198
  {
199
  "epoch": 1.8309859154929577,
200
- "eval_loss": 36.8563117980957,
201
- "eval_runtime": 112.923,
202
- "eval_samples_per_second": 11.176,
203
- "eval_steps_per_second": 1.399,
204
  "step": 650
205
  },
206
  {
207
  "epoch": 1.971830985915493,
208
- "grad_norm": 149.2563018798828,
209
- "learning_rate": 4.902805949966194e-05,
210
- "loss": 30.2044,
211
  "step": 700
212
  },
213
  {
214
  "epoch": 1.971830985915493,
215
- "eval_loss": 36.031089782714844,
216
- "eval_runtime": 123.7858,
217
- "eval_samples_per_second": 10.195,
218
- "eval_steps_per_second": 1.276,
219
  "step": 700
220
  },
221
  {
222
  "epoch": 2.112676056338028,
223
- "grad_norm": 151.58270263671875,
224
- "learning_rate": 4.895762902862295e-05,
225
- "loss": 29.5483,
226
  "step": 750
227
  },
228
  {
229
  "epoch": 2.112676056338028,
230
- "eval_loss": 36.10577392578125,
231
- "eval_runtime": 113.9993,
232
- "eval_samples_per_second": 11.07,
233
- "eval_steps_per_second": 1.386,
234
  "step": 750
235
  },
236
  {
237
  "epoch": 2.2535211267605635,
238
- "grad_norm": 151.85011291503906,
239
- "learning_rate": 4.888719855758396e-05,
240
- "loss": 29.2086,
241
  "step": 800
242
  },
243
  {
244
  "epoch": 2.2535211267605635,
245
- "eval_loss": 36.92595291137695,
246
- "eval_runtime": 140.6123,
247
- "eval_samples_per_second": 8.975,
248
- "eval_steps_per_second": 1.124,
249
  "step": 800
250
  },
251
  {
252
  "epoch": 2.3943661971830985,
253
- "grad_norm": 142.75364685058594,
254
- "learning_rate": 4.881676808654497e-05,
255
- "loss": 28.5485,
256
  "step": 850
257
  },
258
  {
259
  "epoch": 2.3943661971830985,
260
- "eval_loss": 36.27175521850586,
261
- "eval_runtime": 126.2759,
262
- "eval_samples_per_second": 9.994,
263
- "eval_steps_per_second": 1.251,
264
  "step": 850
265
  },
266
  {
267
  "epoch": 2.535211267605634,
268
- "grad_norm": 143.0653839111328,
269
- "learning_rate": 4.874633761550598e-05,
270
- "loss": 29.5145,
271
  "step": 900
272
  },
273
  {
274
  "epoch": 2.535211267605634,
275
- "eval_loss": 35.73408889770508,
276
- "eval_runtime": 116.5706,
277
- "eval_samples_per_second": 10.826,
278
- "eval_steps_per_second": 1.355,
279
  "step": 900
280
  },
281
  {
282
  "epoch": 2.676056338028169,
283
- "grad_norm": 190.54563903808594,
284
- "learning_rate": 4.8675907144466983e-05,
285
- "loss": 28.6636,
286
  "step": 950
287
  },
288
  {
289
  "epoch": 2.676056338028169,
290
- "eval_loss": 36.84046173095703,
291
- "eval_runtime": 121.868,
292
- "eval_samples_per_second": 10.355,
293
- "eval_steps_per_second": 1.296,
294
  "step": 950
295
  },
296
  {
297
  "epoch": 2.816901408450704,
298
- "grad_norm": 131.4083251953125,
299
- "learning_rate": 4.8605476673428e-05,
300
- "loss": 28.9409,
301
  "step": 1000
302
  },
303
  {
304
  "epoch": 2.816901408450704,
305
- "eval_loss": 36.52579879760742,
306
- "eval_runtime": 109.1795,
307
- "eval_samples_per_second": 11.559,
308
- "eval_steps_per_second": 1.447,
309
  "step": 1000
310
  },
311
  {
312
  "epoch": 2.9577464788732395,
313
- "grad_norm": 115.78204345703125,
314
- "learning_rate": 4.8535046202389e-05,
315
- "loss": 28.5857,
316
  "step": 1050
317
  },
318
  {
319
  "epoch": 2.9577464788732395,
320
- "eval_loss": 35.284393310546875,
321
- "eval_runtime": 112.4213,
322
- "eval_samples_per_second": 11.226,
323
- "eval_steps_per_second": 1.405,
324
  "step": 1050
325
  },
326
  {
327
  "epoch": 3.0985915492957745,
328
- "grad_norm": 112.22978973388672,
329
- "learning_rate": 4.8464615731350017e-05,
330
- "loss": 27.9033,
331
  "step": 1100
332
  },
333
  {
334
  "epoch": 3.0985915492957745,
335
- "eval_loss": 36.297340393066406,
336
- "eval_runtime": 131.1398,
337
- "eval_samples_per_second": 9.623,
338
- "eval_steps_per_second": 1.205,
339
  "step": 1100
340
  },
341
  {
342
  "epoch": 3.23943661971831,
343
- "grad_norm": 155.36874389648438,
344
- "learning_rate": 4.839418526031102e-05,
345
- "loss": 27.3906,
346
  "step": 1150
347
  },
348
  {
349
  "epoch": 3.23943661971831,
350
- "eval_loss": 36.63642120361328,
351
- "eval_runtime": 100.2617,
352
- "eval_samples_per_second": 12.587,
353
- "eval_steps_per_second": 1.576,
354
  "step": 1150
355
  },
356
  {
357
  "epoch": 3.380281690140845,
358
- "grad_norm": 126.88693237304688,
359
- "learning_rate": 4.8323754789272036e-05,
360
- "loss": 27.6448,
361
  "step": 1200
362
  },
363
  {
364
  "epoch": 3.380281690140845,
365
- "eval_loss": 36.409244537353516,
366
- "eval_runtime": 112.5838,
367
- "eval_samples_per_second": 11.209,
368
- "eval_steps_per_second": 1.403,
369
  "step": 1200
370
  },
371
  {
372
  "epoch": 3.52112676056338,
373
- "grad_norm": 101.8378677368164,
374
- "learning_rate": 4.825332431823304e-05,
375
- "loss": 27.683,
376
  "step": 1250
377
  },
378
  {
379
  "epoch": 3.52112676056338,
380
- "eval_loss": 36.8996467590332,
381
- "eval_runtime": 119.5158,
382
- "eval_samples_per_second": 10.559,
383
- "eval_steps_per_second": 1.322,
384
  "step": 1250
385
  },
386
  {
387
- "epoch": 3.52112676056338,
388
- "step": 1250,
389
- "total_flos": 8.940768232852685e+18,
390
- "train_loss": 32.0865517578125,
391
- "train_runtime": 9282.3659,
392
- "train_samples_per_second": 122.329,
393
- "train_steps_per_second": 3.824
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394
  }
395
  ],
396
  "logging_steps": 50,
@@ -405,7 +690,7 @@
405
  "early_stopping_threshold": 0.0
406
  },
407
  "attributes": {
408
- "early_stopping_patience_counter": 3
409
  }
410
  },
411
  "TrainerControl": {
@@ -414,12 +699,12 @@
414
  "should_evaluate": false,
415
  "should_log": false,
416
  "should_save": true,
417
- "should_training_stop": false
418
  },
419
  "attributes": {}
420
  }
421
  },
422
- "total_flos": 8.940768232852685e+18,
423
  "train_batch_size": 8,
424
  "trial_name": null,
425
  "trial_params": null
 
1
  {
2
+ "best_metric": 28.4991512298584,
3
+ "best_model_checkpoint": "mask2former/checkpoint-2000",
4
+ "epoch": 6.197183098591549,
5
  "eval_steps": 50,
6
+ "global_step": 2200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.14084507042253522,
13
+ "grad_norm": 227.8921661376953,
14
+ "learning_rate": 4.9935203966644134e-05,
15
+ "loss": 50.7018,
16
  "step": 50
17
  },
18
  {
19
  "epoch": 0.14084507042253522,
20
+ "eval_loss": 44.24353790283203,
21
+ "eval_runtime": 135.6148,
22
+ "eval_samples_per_second": 9.306,
23
+ "eval_steps_per_second": 1.165,
24
  "step": 50
25
  },
26
  {
27
  "epoch": 0.28169014084507044,
28
+ "grad_norm": 232.9764404296875,
29
+ "learning_rate": 4.986477349560514e-05,
30
+ "loss": 40.5877,
31
  "step": 100
32
  },
33
  {
34
  "epoch": 0.28169014084507044,
35
+ "eval_loss": 39.646488189697266,
36
+ "eval_runtime": 119.1324,
37
+ "eval_samples_per_second": 10.593,
38
+ "eval_steps_per_second": 1.326,
39
  "step": 100
40
  },
41
  {
42
  "epoch": 0.4225352112676056,
43
+ "grad_norm": 375.6308898925781,
44
+ "learning_rate": 4.979434302456615e-05,
45
+ "loss": 37.4102,
46
  "step": 150
47
  },
48
  {
49
  "epoch": 0.4225352112676056,
50
+ "eval_loss": 37.247108459472656,
51
+ "eval_runtime": 119.6132,
52
+ "eval_samples_per_second": 10.551,
53
+ "eval_steps_per_second": 1.321,
54
  "step": 150
55
  },
56
  {
57
  "epoch": 0.5633802816901409,
58
+ "grad_norm": 189.58392333984375,
59
+ "learning_rate": 4.972391255352716e-05,
60
+ "loss": 35.7502,
61
  "step": 200
62
  },
63
  {
64
  "epoch": 0.5633802816901409,
65
+ "eval_loss": 36.34551239013672,
66
+ "eval_runtime": 132.8881,
67
+ "eval_samples_per_second": 9.497,
68
+ "eval_steps_per_second": 1.189,
69
  "step": 200
70
  },
71
  {
72
  "epoch": 0.704225352112676,
73
+ "grad_norm": 163.74330139160156,
74
+ "learning_rate": 4.965348208248817e-05,
75
+ "loss": 34.7067,
76
  "step": 250
77
  },
78
  {
79
  "epoch": 0.704225352112676,
80
+ "eval_loss": 34.88238525390625,
81
+ "eval_runtime": 120.2665,
82
+ "eval_samples_per_second": 10.493,
83
+ "eval_steps_per_second": 1.314,
84
  "step": 250
85
  },
86
  {
87
  "epoch": 0.8450704225352113,
88
+ "grad_norm": 173.85494995117188,
89
+ "learning_rate": 4.958305161144918e-05,
90
+ "loss": 34.0798,
91
  "step": 300
92
  },
93
  {
94
  "epoch": 0.8450704225352113,
95
+ "eval_loss": 34.85204315185547,
96
+ "eval_runtime": 129.3353,
97
+ "eval_samples_per_second": 9.758,
98
+ "eval_steps_per_second": 1.222,
99
  "step": 300
100
  },
101
  {
102
  "epoch": 0.9859154929577465,
103
+ "grad_norm": 167.4436492919922,
104
+ "learning_rate": 4.951262114041019e-05,
105
+ "loss": 33.3503,
106
  "step": 350
107
  },
108
  {
109
  "epoch": 0.9859154929577465,
110
+ "eval_loss": 33.73210144042969,
111
+ "eval_runtime": 143.3502,
112
+ "eval_samples_per_second": 8.804,
113
+ "eval_steps_per_second": 1.102,
114
  "step": 350
115
  },
116
  {
117
  "epoch": 1.1267605633802817,
118
+ "grad_norm": 202.66012573242188,
119
+ "learning_rate": 4.944219066937119e-05,
120
+ "loss": 32.3436,
121
  "step": 400
122
  },
123
  {
124
  "epoch": 1.1267605633802817,
125
+ "eval_loss": 33.15604782104492,
126
+ "eval_runtime": 123.8826,
127
+ "eval_samples_per_second": 10.187,
128
+ "eval_steps_per_second": 1.275,
129
  "step": 400
130
  },
131
  {
132
  "epoch": 1.267605633802817,
133
+ "grad_norm": 145.71310424804688,
134
+ "learning_rate": 4.937176019833221e-05,
135
+ "loss": 32.3845,
136
  "step": 450
137
  },
138
  {
139
  "epoch": 1.267605633802817,
140
+ "eval_loss": 33.041107177734375,
141
+ "eval_runtime": 125.7421,
142
+ "eval_samples_per_second": 10.036,
143
+ "eval_steps_per_second": 1.257,
144
  "step": 450
145
  },
146
  {
147
  "epoch": 1.408450704225352,
148
+ "grad_norm": 143.16452026367188,
149
+ "learning_rate": 4.930132972729321e-05,
150
+ "loss": 30.8809,
151
  "step": 500
152
  },
153
  {
154
  "epoch": 1.408450704225352,
155
+ "eval_loss": 32.785240173339844,
156
+ "eval_runtime": 137.1907,
157
+ "eval_samples_per_second": 9.199,
158
+ "eval_steps_per_second": 1.152,
159
  "step": 500
160
  },
161
  {
162
  "epoch": 1.5492957746478875,
163
+ "grad_norm": 159.82777404785156,
164
+ "learning_rate": 4.9230899256254227e-05,
165
+ "loss": 31.689,
166
  "step": 550
167
  },
168
  {
169
  "epoch": 1.5492957746478875,
170
+ "eval_loss": 31.99137306213379,
171
+ "eval_runtime": 129.225,
172
+ "eval_samples_per_second": 9.766,
173
+ "eval_steps_per_second": 1.223,
174
  "step": 550
175
  },
176
  {
177
  "epoch": 1.6901408450704225,
178
+ "grad_norm": 160.78164672851562,
179
+ "learning_rate": 4.916046878521523e-05,
180
+ "loss": 31.036,
181
  "step": 600
182
  },
183
  {
184
  "epoch": 1.6901408450704225,
185
+ "eval_loss": 32.72974395751953,
186
+ "eval_runtime": 129.162,
187
+ "eval_samples_per_second": 9.771,
188
+ "eval_steps_per_second": 1.223,
189
  "step": 600
190
  },
191
  {
192
  "epoch": 1.8309859154929577,
193
+ "grad_norm": 206.84974670410156,
194
+ "learning_rate": 4.9090038314176246e-05,
195
+ "loss": 30.9795,
196
  "step": 650
197
  },
198
  {
199
  "epoch": 1.8309859154929577,
200
+ "eval_loss": 31.88483238220215,
201
+ "eval_runtime": 126.8688,
202
+ "eval_samples_per_second": 9.947,
203
+ "eval_steps_per_second": 1.245,
204
  "step": 650
205
  },
206
  {
207
  "epoch": 1.971830985915493,
208
+ "grad_norm": 128.7499237060547,
209
+ "learning_rate": 4.901960784313725e-05,
210
+ "loss": 30.7918,
211
  "step": 700
212
  },
213
  {
214
  "epoch": 1.971830985915493,
215
+ "eval_loss": 31.528514862060547,
216
+ "eval_runtime": 131.1893,
217
+ "eval_samples_per_second": 9.62,
218
+ "eval_steps_per_second": 1.204,
219
  "step": 700
220
  },
221
  {
222
  "epoch": 2.112676056338028,
223
+ "grad_norm": 155.95223999023438,
224
+ "learning_rate": 4.8949177372098266e-05,
225
+ "loss": 30.1432,
226
  "step": 750
227
  },
228
  {
229
  "epoch": 2.112676056338028,
230
+ "eval_loss": 32.06336212158203,
231
+ "eval_runtime": 130.1706,
232
+ "eval_samples_per_second": 9.695,
233
+ "eval_steps_per_second": 1.214,
234
  "step": 750
235
  },
236
  {
237
  "epoch": 2.2535211267605635,
238
+ "grad_norm": 122.61072540283203,
239
+ "learning_rate": 4.887874690105927e-05,
240
+ "loss": 29.7082,
241
  "step": 800
242
  },
243
  {
244
  "epoch": 2.2535211267605635,
245
+ "eval_loss": 31.184894561767578,
246
+ "eval_runtime": 121.0117,
247
+ "eval_samples_per_second": 10.429,
248
+ "eval_steps_per_second": 1.306,
249
  "step": 800
250
  },
251
  {
252
  "epoch": 2.3943661971830985,
253
+ "grad_norm": 139.07225036621094,
254
+ "learning_rate": 4.8808316430020286e-05,
255
+ "loss": 28.7869,
256
  "step": 850
257
  },
258
  {
259
  "epoch": 2.3943661971830985,
260
+ "eval_loss": 30.902196884155273,
261
+ "eval_runtime": 127.2634,
262
+ "eval_samples_per_second": 9.916,
263
+ "eval_steps_per_second": 1.242,
264
  "step": 850
265
  },
266
  {
267
  "epoch": 2.535211267605634,
268
+ "grad_norm": 188.21234130859375,
269
+ "learning_rate": 4.873788595898129e-05,
270
+ "loss": 29.4227,
271
  "step": 900
272
  },
273
  {
274
  "epoch": 2.535211267605634,
275
+ "eval_loss": 30.5902099609375,
276
+ "eval_runtime": 146.3048,
277
+ "eval_samples_per_second": 8.626,
278
+ "eval_steps_per_second": 1.08,
279
  "step": 900
280
  },
281
  {
282
  "epoch": 2.676056338028169,
283
+ "grad_norm": 302.049560546875,
284
+ "learning_rate": 4.8667455487942306e-05,
285
+ "loss": 29.1865,
286
  "step": 950
287
  },
288
  {
289
  "epoch": 2.676056338028169,
290
+ "eval_loss": 30.381799697875977,
291
+ "eval_runtime": 118.3226,
292
+ "eval_samples_per_second": 10.666,
293
+ "eval_steps_per_second": 1.335,
294
  "step": 950
295
  },
296
  {
297
  "epoch": 2.816901408450704,
298
+ "grad_norm": 151.5100860595703,
299
+ "learning_rate": 4.859702501690331e-05,
300
+ "loss": 29.2715,
301
  "step": 1000
302
  },
303
  {
304
  "epoch": 2.816901408450704,
305
+ "eval_loss": 30.919567108154297,
306
+ "eval_runtime": 128.7879,
307
+ "eval_samples_per_second": 9.799,
308
+ "eval_steps_per_second": 1.227,
309
  "step": 1000
310
  },
311
  {
312
  "epoch": 2.9577464788732395,
313
+ "grad_norm": 135.14910888671875,
314
+ "learning_rate": 4.8526594545864326e-05,
315
+ "loss": 29.1941,
316
  "step": 1050
317
  },
318
  {
319
  "epoch": 2.9577464788732395,
320
+ "eval_loss": 30.816268920898438,
321
+ "eval_runtime": 127.4007,
322
+ "eval_samples_per_second": 9.906,
323
+ "eval_steps_per_second": 1.24,
324
  "step": 1050
325
  },
326
  {
327
  "epoch": 3.0985915492957745,
328
+ "grad_norm": 121.41586303710938,
329
+ "learning_rate": 4.845616407482533e-05,
330
+ "loss": 28.5256,
331
  "step": 1100
332
  },
333
  {
334
  "epoch": 3.0985915492957745,
335
+ "eval_loss": 30.472957611083984,
336
+ "eval_runtime": 144.0632,
337
+ "eval_samples_per_second": 8.76,
338
+ "eval_steps_per_second": 1.097,
339
  "step": 1100
340
  },
341
  {
342
  "epoch": 3.23943661971831,
343
+ "grad_norm": 217.75247192382812,
344
+ "learning_rate": 4.8385733603786346e-05,
345
+ "loss": 28.0419,
346
  "step": 1150
347
  },
348
  {
349
  "epoch": 3.23943661971831,
350
+ "eval_loss": 30.653095245361328,
351
+ "eval_runtime": 129.0443,
352
+ "eval_samples_per_second": 9.78,
353
+ "eval_steps_per_second": 1.224,
354
  "step": 1150
355
  },
356
  {
357
  "epoch": 3.380281690140845,
358
+ "grad_norm": 129.21693420410156,
359
+ "learning_rate": 4.831530313274735e-05,
360
+ "loss": 28.0538,
361
  "step": 1200
362
  },
363
  {
364
  "epoch": 3.380281690140845,
365
+ "eval_loss": 30.077850341796875,
366
+ "eval_runtime": 119.4037,
367
+ "eval_samples_per_second": 10.569,
368
+ "eval_steps_per_second": 1.323,
369
  "step": 1200
370
  },
371
  {
372
  "epoch": 3.52112676056338,
373
+ "grad_norm": 116.27620697021484,
374
+ "learning_rate": 4.8244872661708365e-05,
375
+ "loss": 27.9463,
376
  "step": 1250
377
  },
378
  {
379
  "epoch": 3.52112676056338,
380
+ "eval_loss": 30.61139488220215,
381
+ "eval_runtime": 132.5429,
382
+ "eval_samples_per_second": 9.521,
383
+ "eval_steps_per_second": 1.192,
384
  "step": 1250
385
  },
386
  {
387
+ "epoch": 3.6619718309859155,
388
+ "grad_norm": 149.399169921875,
389
+ "learning_rate": 4.817444219066937e-05,
390
+ "loss": 27.4152,
391
+ "step": 1300
392
+ },
393
+ {
394
+ "epoch": 3.6619718309859155,
395
+ "eval_loss": 30.551870346069336,
396
+ "eval_runtime": 130.4003,
397
+ "eval_samples_per_second": 9.678,
398
+ "eval_steps_per_second": 1.212,
399
+ "step": 1300
400
+ },
401
+ {
402
+ "epoch": 3.802816901408451,
403
+ "grad_norm": 175.60769653320312,
404
+ "learning_rate": 4.8104011719630385e-05,
405
+ "loss": 27.7461,
406
+ "step": 1350
407
+ },
408
+ {
409
+ "epoch": 3.802816901408451,
410
+ "eval_loss": 29.564067840576172,
411
+ "eval_runtime": 131.8055,
412
+ "eval_samples_per_second": 9.575,
413
+ "eval_steps_per_second": 1.199,
414
+ "step": 1350
415
+ },
416
+ {
417
+ "epoch": 3.943661971830986,
418
+ "grad_norm": 114.43487548828125,
419
+ "learning_rate": 4.803358124859139e-05,
420
+ "loss": 27.5604,
421
+ "step": 1400
422
+ },
423
+ {
424
+ "epoch": 3.943661971830986,
425
+ "eval_loss": 30.12961769104004,
426
+ "eval_runtime": 148.0714,
427
+ "eval_samples_per_second": 8.523,
428
+ "eval_steps_per_second": 1.067,
429
+ "step": 1400
430
+ },
431
+ {
432
+ "epoch": 4.084507042253521,
433
+ "grad_norm": 189.4749298095703,
434
+ "learning_rate": 4.79631507775524e-05,
435
+ "loss": 27.381,
436
+ "step": 1450
437
+ },
438
+ {
439
+ "epoch": 4.084507042253521,
440
+ "eval_loss": 30.50173568725586,
441
+ "eval_runtime": 124.4448,
442
+ "eval_samples_per_second": 10.141,
443
+ "eval_steps_per_second": 1.27,
444
+ "step": 1450
445
+ },
446
+ {
447
+ "epoch": 4.225352112676056,
448
+ "grad_norm": 129.88868713378906,
449
+ "learning_rate": 4.789272030651341e-05,
450
+ "loss": 26.3816,
451
+ "step": 1500
452
+ },
453
+ {
454
+ "epoch": 4.225352112676056,
455
+ "eval_loss": 29.6898193359375,
456
+ "eval_runtime": 129.216,
457
+ "eval_samples_per_second": 9.767,
458
+ "eval_steps_per_second": 1.223,
459
+ "step": 1500
460
+ },
461
+ {
462
+ "epoch": 4.366197183098592,
463
+ "grad_norm": 138.11952209472656,
464
+ "learning_rate": 4.782228983547442e-05,
465
+ "loss": 26.5218,
466
+ "step": 1550
467
+ },
468
+ {
469
+ "epoch": 4.366197183098592,
470
+ "eval_loss": 29.94746971130371,
471
+ "eval_runtime": 132.27,
472
+ "eval_samples_per_second": 9.541,
473
+ "eval_steps_per_second": 1.195,
474
+ "step": 1550
475
+ },
476
+ {
477
+ "epoch": 4.507042253521127,
478
+ "grad_norm": 185.40530395507812,
479
+ "learning_rate": 4.775185936443543e-05,
480
+ "loss": 26.9798,
481
+ "step": 1600
482
+ },
483
+ {
484
+ "epoch": 4.507042253521127,
485
+ "eval_loss": 29.332275390625,
486
+ "eval_runtime": 137.9492,
487
+ "eval_samples_per_second": 9.148,
488
+ "eval_steps_per_second": 1.145,
489
+ "step": 1600
490
+ },
491
+ {
492
+ "epoch": 4.647887323943662,
493
+ "grad_norm": 135.86349487304688,
494
+ "learning_rate": 4.768142889339644e-05,
495
+ "loss": 26.8186,
496
+ "step": 1650
497
+ },
498
+ {
499
+ "epoch": 4.647887323943662,
500
+ "eval_loss": 29.575531005859375,
501
+ "eval_runtime": 135.1713,
502
+ "eval_samples_per_second": 9.336,
503
+ "eval_steps_per_second": 1.169,
504
+ "step": 1650
505
+ },
506
+ {
507
+ "epoch": 4.788732394366197,
508
+ "grad_norm": 153.70196533203125,
509
+ "learning_rate": 4.761099842235745e-05,
510
+ "loss": 27.5111,
511
+ "step": 1700
512
+ },
513
+ {
514
+ "epoch": 4.788732394366197,
515
+ "eval_loss": 30.7945499420166,
516
+ "eval_runtime": 117.37,
517
+ "eval_samples_per_second": 10.752,
518
+ "eval_steps_per_second": 1.346,
519
+ "step": 1700
520
+ },
521
+ {
522
+ "epoch": 4.929577464788732,
523
+ "grad_norm": 150.87384033203125,
524
+ "learning_rate": 4.754056795131846e-05,
525
+ "loss": 27.0839,
526
+ "step": 1750
527
+ },
528
+ {
529
+ "epoch": 4.929577464788732,
530
+ "eval_loss": 29.414661407470703,
531
+ "eval_runtime": 120.7787,
532
+ "eval_samples_per_second": 10.449,
533
+ "eval_steps_per_second": 1.308,
534
+ "step": 1750
535
+ },
536
+ {
537
+ "epoch": 5.070422535211268,
538
+ "grad_norm": 140.32867431640625,
539
+ "learning_rate": 4.747013748027947e-05,
540
+ "loss": 26.6393,
541
+ "step": 1800
542
+ },
543
+ {
544
+ "epoch": 5.070422535211268,
545
+ "eval_loss": 28.79827880859375,
546
+ "eval_runtime": 126.6565,
547
+ "eval_samples_per_second": 9.964,
548
+ "eval_steps_per_second": 1.247,
549
+ "step": 1800
550
+ },
551
+ {
552
+ "epoch": 5.211267605633803,
553
+ "grad_norm": 105.2396469116211,
554
+ "learning_rate": 4.739970700924048e-05,
555
+ "loss": 26.3564,
556
+ "step": 1850
557
+ },
558
+ {
559
+ "epoch": 5.211267605633803,
560
+ "eval_loss": 29.22454261779785,
561
+ "eval_runtime": 138.64,
562
+ "eval_samples_per_second": 9.103,
563
+ "eval_steps_per_second": 1.14,
564
+ "step": 1850
565
+ },
566
+ {
567
+ "epoch": 5.352112676056338,
568
+ "grad_norm": 158.32907104492188,
569
+ "learning_rate": 4.732927653820149e-05,
570
+ "loss": 25.6174,
571
+ "step": 1900
572
+ },
573
+ {
574
+ "epoch": 5.352112676056338,
575
+ "eval_loss": 28.933706283569336,
576
+ "eval_runtime": 124.4051,
577
+ "eval_samples_per_second": 10.144,
578
+ "eval_steps_per_second": 1.27,
579
+ "step": 1900
580
+ },
581
+ {
582
+ "epoch": 5.492957746478873,
583
+ "grad_norm": 209.70982360839844,
584
+ "learning_rate": 4.72588460671625e-05,
585
+ "loss": 25.8777,
586
+ "step": 1950
587
+ },
588
+ {
589
+ "epoch": 5.492957746478873,
590
+ "eval_loss": 29.477840423583984,
591
+ "eval_runtime": 138.0785,
592
+ "eval_samples_per_second": 9.14,
593
+ "eval_steps_per_second": 1.144,
594
+ "step": 1950
595
+ },
596
+ {
597
+ "epoch": 5.633802816901408,
598
+ "grad_norm": 172.13571166992188,
599
+ "learning_rate": 4.718841559612351e-05,
600
+ "loss": 25.6848,
601
+ "step": 2000
602
+ },
603
+ {
604
+ "epoch": 5.633802816901408,
605
+ "eval_loss": 28.4991512298584,
606
+ "eval_runtime": 154.121,
607
+ "eval_samples_per_second": 8.188,
608
+ "eval_steps_per_second": 1.025,
609
+ "step": 2000
610
+ },
611
+ {
612
+ "epoch": 5.774647887323944,
613
+ "grad_norm": 133.943115234375,
614
+ "learning_rate": 4.711798512508452e-05,
615
+ "loss": 26.4625,
616
+ "step": 2050
617
+ },
618
+ {
619
+ "epoch": 5.774647887323944,
620
+ "eval_loss": 29.618194580078125,
621
+ "eval_runtime": 118.4654,
622
+ "eval_samples_per_second": 10.653,
623
+ "eval_steps_per_second": 1.334,
624
+ "step": 2050
625
+ },
626
+ {
627
+ "epoch": 5.915492957746479,
628
+ "grad_norm": 143.0037078857422,
629
+ "learning_rate": 4.704755465404553e-05,
630
+ "loss": 26.8448,
631
+ "step": 2100
632
+ },
633
+ {
634
+ "epoch": 5.915492957746479,
635
+ "eval_loss": 29.537738800048828,
636
+ "eval_runtime": 144.9122,
637
+ "eval_samples_per_second": 8.709,
638
+ "eval_steps_per_second": 1.09,
639
+ "step": 2100
640
+ },
641
+ {
642
+ "epoch": 6.056338028169014,
643
+ "grad_norm": 127.10630798339844,
644
+ "learning_rate": 4.697712418300654e-05,
645
+ "loss": 26.0681,
646
+ "step": 2150
647
+ },
648
+ {
649
+ "epoch": 6.056338028169014,
650
+ "eval_loss": 29.239002227783203,
651
+ "eval_runtime": 131.0446,
652
+ "eval_samples_per_second": 9.63,
653
+ "eval_steps_per_second": 1.206,
654
+ "step": 2150
655
+ },
656
+ {
657
+ "epoch": 6.197183098591549,
658
+ "grad_norm": 142.51170349121094,
659
+ "learning_rate": 4.690669371196755e-05,
660
+ "loss": 25.628,
661
+ "step": 2200
662
+ },
663
+ {
664
+ "epoch": 6.197183098591549,
665
+ "eval_loss": 29.1112003326416,
666
+ "eval_runtime": 119.9659,
667
+ "eval_samples_per_second": 10.52,
668
+ "eval_steps_per_second": 1.317,
669
+ "step": 2200
670
+ },
671
+ {
672
+ "epoch": 6.197183098591549,
673
+ "step": 2200,
674
+ "total_flos": 8.850907754333798e+18,
675
+ "train_loss": 29.80798134543679,
676
+ "train_runtime": 17574.1061,
677
+ "train_samples_per_second": 64.612,
678
+ "train_steps_per_second": 2.02
679
  }
680
  ],
681
  "logging_steps": 50,
 
690
  "early_stopping_threshold": 0.0
691
  },
692
  "attributes": {
693
+ "early_stopping_patience_counter": 4
694
  }
695
  },
696
  "TrainerControl": {
 
699
  "should_evaluate": false,
700
  "should_log": false,
701
  "should_save": true,
702
+ "should_training_stop": true
703
  },
704
  "attributes": {}
705
  }
706
  },
707
+ "total_flos": 8.850907754333798e+18,
708
  "train_batch_size": 8,
709
  "trial_name": null,
710
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f195d499db88aa6ef99e3261827b3c7b85f2a5b0917267588681b08989f42b9c
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9da334fac8e5bddd30bc32e51ac79bfbfb26f83868f0b31c3f99b8c64a3f17
3
  size 5240