fpadovani commited on
Commit
a7035c8
·
verified ·
1 Parent(s): 8922e22

Training in progress, step 52000, checkpoint

Browse files
checkpoint-52000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25b0faf582d0572d3dfbb321f67808f8d82a4e959236ecebaefdc3b1e2eede45
3
  size 51007160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:910837181f15e609dc1e3f951a61f2b11f6a237fa1dd411662067a588bed49c8
3
  size 51007160
checkpoint-52000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5665feaf91024731ff3c2aeb7c3370021f87110da3354b6ebe8ae84672ecfca
3
  size 102078202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:309e324ffa95e332b9f155d75c6788530534898141fd101a2291cb39f53a858c
3
  size 102078202
checkpoint-52000/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd90f1056ea5372ff9b85691bc69b7a12e7e21ab7d9b345c65889c568e4af974
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66351ae7246be1c11af7bb2221b3a7ea56597e19668b522d8534d87ab16f0235
3
  size 14308
checkpoint-52000/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
checkpoint-52000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 3.9348273277282715,
3
- "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/fr_clm/childes_30/checkpoint-28000",
4
- "epoch": 81.76100628930817,
5
  "eval_steps": 2000,
6
  "global_step": 52000,
7
  "is_hyper_param_search": false,
@@ -9,309 +9,309 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 3.1446540880503147,
13
- "eval_loss": 6.67229700088501,
14
- "eval_runtime": 0.6568,
15
- "eval_samples_per_second": 1386.938,
16
- "eval_steps_per_second": 86.779,
17
  "step": 2000
18
  },
19
  {
20
- "epoch": 6.289308176100629,
21
- "grad_norm": 1.4630517959594727,
22
  "learning_rate": 1e-05,
23
- "loss": 6.5359,
24
  "step": 4000
25
  },
26
  {
27
- "epoch": 6.289308176100629,
28
- "eval_loss": 5.102311611175537,
29
- "eval_runtime": 0.6383,
30
- "eval_samples_per_second": 1427.13,
31
- "eval_steps_per_second": 89.294,
32
  "step": 4000
33
  },
34
  {
35
- "epoch": 9.433962264150944,
36
- "eval_loss": 4.675341606140137,
37
- "eval_runtime": 0.6396,
38
- "eval_samples_per_second": 1424.427,
39
- "eval_steps_per_second": 89.124,
40
  "step": 6000
41
  },
42
  {
43
- "epoch": 12.578616352201259,
44
- "grad_norm": 2.3778915405273438,
45
  "learning_rate": 2e-05,
46
- "loss": 4.34,
47
  "step": 8000
48
  },
49
  {
50
- "epoch": 12.578616352201259,
51
- "eval_loss": 4.42986536026001,
52
- "eval_runtime": 0.646,
53
- "eval_samples_per_second": 1410.229,
54
- "eval_steps_per_second": 88.236,
55
  "step": 8000
56
  },
57
  {
58
- "epoch": 15.723270440251572,
59
- "eval_loss": 4.2789154052734375,
60
- "eval_runtime": 0.6485,
61
- "eval_samples_per_second": 1404.856,
62
- "eval_steps_per_second": 87.9,
63
  "step": 10000
64
  },
65
  {
66
- "epoch": 18.867924528301888,
67
- "grad_norm": 2.512312889099121,
68
- "learning_rate": 2.9995e-05,
69
- "loss": 3.8692,
70
  "step": 12000
71
  },
72
  {
73
- "epoch": 18.867924528301888,
74
- "eval_loss": 4.1580729484558105,
75
- "eval_runtime": 0.6403,
76
- "eval_samples_per_second": 1422.769,
77
- "eval_steps_per_second": 89.021,
78
  "step": 12000
79
  },
80
  {
81
- "epoch": 22.0125786163522,
82
- "eval_loss": 4.082061767578125,
83
- "eval_runtime": 0.6415,
84
- "eval_samples_per_second": 1420.041,
85
- "eval_steps_per_second": 88.85,
86
  "step": 14000
87
  },
88
  {
89
- "epoch": 25.157232704402517,
90
- "grad_norm": 2.4205257892608643,
91
  "learning_rate": 3.999e-05,
92
- "loss": 3.5905,
93
  "step": 16000
94
  },
95
  {
96
- "epoch": 25.157232704402517,
97
- "eval_loss": 4.033827781677246,
98
- "eval_runtime": 0.6416,
99
- "eval_samples_per_second": 1419.823,
100
- "eval_steps_per_second": 88.836,
101
  "step": 16000
102
  },
103
  {
104
- "epoch": 28.30188679245283,
105
- "eval_loss": 3.9935402870178223,
106
- "eval_runtime": 0.6614,
107
- "eval_samples_per_second": 1377.343,
108
- "eval_steps_per_second": 86.178,
109
  "step": 18000
110
  },
111
  {
112
- "epoch": 31.446540880503143,
113
- "grad_norm": 2.5674731731414795,
114
  "learning_rate": 4.9985e-05,
115
- "loss": 3.3778,
116
  "step": 20000
117
  },
118
  {
119
- "epoch": 31.446540880503143,
120
- "eval_loss": 3.9675967693328857,
121
- "eval_runtime": 0.6406,
122
- "eval_samples_per_second": 1422.059,
123
- "eval_steps_per_second": 88.976,
124
  "step": 20000
125
  },
126
  {
127
- "epoch": 34.59119496855346,
128
- "eval_loss": 3.9494004249572754,
129
- "eval_runtime": 0.6409,
130
- "eval_samples_per_second": 1421.391,
131
- "eval_steps_per_second": 88.934,
132
  "step": 22000
133
  },
134
  {
135
- "epoch": 37.735849056603776,
136
- "grad_norm": 2.273855447769165,
137
  "learning_rate": 5.9980000000000005e-05,
138
- "loss": 3.1983,
139
  "step": 24000
140
  },
141
  {
142
- "epoch": 37.735849056603776,
143
- "eval_loss": 3.9382567405700684,
144
- "eval_runtime": 0.6423,
145
- "eval_samples_per_second": 1418.386,
146
- "eval_steps_per_second": 88.746,
147
  "step": 24000
148
  },
149
  {
150
- "epoch": 40.880503144654085,
151
- "eval_loss": 3.936068296432495,
152
- "eval_runtime": 0.6434,
153
- "eval_samples_per_second": 1415.972,
154
- "eval_steps_per_second": 88.595,
155
  "step": 26000
156
  },
157
  {
158
- "epoch": 44.0251572327044,
159
- "grad_norm": 2.377107858657837,
160
  "learning_rate": 6.997500000000001e-05,
161
- "loss": 3.0374,
162
  "step": 28000
163
  },
164
  {
165
- "epoch": 44.0251572327044,
166
- "eval_loss": 3.9348273277282715,
167
- "eval_runtime": 0.6383,
168
- "eval_samples_per_second": 1427.128,
169
- "eval_steps_per_second": 89.293,
170
  "step": 28000
171
  },
172
  {
173
- "epoch": 47.16981132075472,
174
- "eval_loss": 3.9625139236450195,
175
- "eval_runtime": 0.6461,
176
- "eval_samples_per_second": 1409.936,
177
- "eval_steps_per_second": 88.218,
178
  "step": 30000
179
  },
180
  {
181
- "epoch": 50.314465408805034,
182
- "grad_norm": 2.460918426513672,
183
  "learning_rate": 7.997e-05,
184
- "loss": 2.887,
185
  "step": 32000
186
  },
187
  {
188
- "epoch": 50.314465408805034,
189
- "eval_loss": 3.9899542331695557,
190
- "eval_runtime": 0.6383,
191
- "eval_samples_per_second": 1427.149,
192
- "eval_steps_per_second": 89.295,
193
  "step": 32000
194
  },
195
  {
196
- "epoch": 53.459119496855344,
197
- "eval_loss": 4.022701740264893,
198
- "eval_runtime": 0.6416,
199
- "eval_samples_per_second": 1419.9,
200
- "eval_steps_per_second": 88.841,
201
  "step": 34000
202
  },
203
  {
204
- "epoch": 56.60377358490566,
205
- "grad_norm": 2.467155694961548,
206
  "learning_rate": 8.9965e-05,
207
- "loss": 2.7426,
208
  "step": 36000
209
  },
210
  {
211
- "epoch": 56.60377358490566,
212
- "eval_loss": 4.057339191436768,
213
- "eval_runtime": 0.6431,
214
- "eval_samples_per_second": 1416.6,
215
- "eval_steps_per_second": 88.635,
216
  "step": 36000
217
  },
218
  {
219
- "epoch": 59.74842767295598,
220
- "eval_loss": 4.104013442993164,
221
- "eval_runtime": 0.6393,
222
- "eval_samples_per_second": 1424.887,
223
- "eval_steps_per_second": 89.153,
224
  "step": 38000
225
  },
226
  {
227
- "epoch": 62.893081761006286,
228
- "grad_norm": 2.5848844051361084,
229
  "learning_rate": 9.996000000000001e-05,
230
- "loss": 2.5994,
231
  "step": 40000
232
  },
233
  {
234
- "epoch": 62.893081761006286,
235
- "eval_loss": 4.14555549621582,
236
- "eval_runtime": 0.6397,
237
- "eval_samples_per_second": 1424.163,
238
- "eval_steps_per_second": 89.108,
239
  "step": 40000
240
  },
241
  {
242
- "epoch": 66.0377358490566,
243
- "eval_loss": 4.226268768310547,
244
- "eval_runtime": 0.6407,
245
- "eval_samples_per_second": 1421.964,
246
- "eval_steps_per_second": 88.97,
247
  "step": 42000
248
  },
249
  {
250
- "epoch": 69.18238993710692,
251
- "grad_norm": 2.974116325378418,
252
  "learning_rate": 9.336333333333334e-05,
253
- "loss": 2.4407,
254
  "step": 44000
255
  },
256
  {
257
- "epoch": 69.18238993710692,
258
- "eval_loss": 4.292743682861328,
259
- "eval_runtime": 0.6391,
260
- "eval_samples_per_second": 1425.506,
261
- "eval_steps_per_second": 89.192,
262
  "step": 44000
263
  },
264
  {
265
- "epoch": 72.32704402515724,
266
- "eval_loss": 4.376113414764404,
267
- "eval_runtime": 0.639,
268
- "eval_samples_per_second": 1425.622,
269
- "eval_steps_per_second": 89.199,
270
  "step": 46000
271
  },
272
  {
273
- "epoch": 75.47169811320755,
274
- "grad_norm": 3.167375087738037,
275
  "learning_rate": 8.67e-05,
276
- "loss": 2.2824,
277
  "step": 48000
278
  },
279
  {
280
- "epoch": 75.47169811320755,
281
- "eval_loss": 4.446501731872559,
282
- "eval_runtime": 0.6391,
283
- "eval_samples_per_second": 1425.43,
284
- "eval_steps_per_second": 89.187,
285
  "step": 48000
286
  },
287
  {
288
- "epoch": 78.61635220125787,
289
- "eval_loss": 4.518404960632324,
290
- "eval_runtime": 0.6421,
291
- "eval_samples_per_second": 1418.867,
292
- "eval_steps_per_second": 88.777,
293
  "step": 50000
294
  },
295
  {
296
- "epoch": 81.76100628930817,
297
- "grad_norm": 3.280864715576172,
298
  "learning_rate": 8.003666666666667e-05,
299
- "loss": 2.1444,
300
  "step": 52000
301
  },
302
  {
303
- "epoch": 81.76100628930817,
304
- "eval_loss": 4.594879150390625,
305
- "eval_runtime": 0.6404,
306
- "eval_samples_per_second": 1422.632,
307
- "eval_steps_per_second": 89.012,
308
  "step": 52000
309
  }
310
  ],
311
  "logging_steps": 4000,
312
  "max_steps": 100000,
313
  "num_input_tokens_seen": 0,
314
- "num_train_epochs": 158,
315
  "save_steps": 4000,
316
  "stateful_callbacks": {
317
  "TrainerControl": {
@@ -325,7 +325,7 @@
325
  "attributes": {}
326
  }
327
  },
328
- "total_flos": 1.3444135450066944e+16,
329
  "train_batch_size": 16,
330
  "trial_name": null,
331
  "trial_params": null
 
1
  {
2
+ "best_metric": 4.583011150360107,
3
+ "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/de_clm/childes_30/checkpoint-32000",
4
+ "epoch": 54.507337526205454,
5
  "eval_steps": 2000,
6
  "global_step": 52000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 2.0964360587002098,
13
+ "eval_loss": 7.102903366088867,
14
+ "eval_runtime": 0.9708,
15
+ "eval_samples_per_second": 1416.286,
16
+ "eval_steps_per_second": 88.582,
17
  "step": 2000
18
  },
19
  {
20
+ "epoch": 4.1928721174004195,
21
+ "grad_norm": 1.3964662551879883,
22
  "learning_rate": 1e-05,
23
+ "loss": 6.9987,
24
  "step": 4000
25
  },
26
  {
27
+ "epoch": 4.1928721174004195,
28
+ "eval_loss": 5.884151935577393,
29
+ "eval_runtime": 0.966,
30
+ "eval_samples_per_second": 1423.408,
31
+ "eval_steps_per_second": 89.028,
32
  "step": 4000
33
  },
34
  {
35
+ "epoch": 6.289308176100629,
36
+ "eval_loss": 5.54873514175415,
37
+ "eval_runtime": 0.9657,
38
+ "eval_samples_per_second": 1423.84,
39
+ "eval_steps_per_second": 89.055,
40
  "step": 6000
41
  },
42
  {
43
+ "epoch": 8.385744234800839,
44
+ "grad_norm": 2.7172107696533203,
45
  "learning_rate": 2e-05,
46
+ "loss": 5.2204,
47
  "step": 8000
48
  },
49
  {
50
+ "epoch": 8.385744234800839,
51
+ "eval_loss": 5.2793288230896,
52
+ "eval_runtime": 0.9644,
53
+ "eval_samples_per_second": 1425.779,
54
+ "eval_steps_per_second": 89.176,
55
  "step": 8000
56
  },
57
  {
58
+ "epoch": 10.482180293501049,
59
+ "eval_loss": 5.10486364364624,
60
+ "eval_runtime": 0.9641,
61
+ "eval_samples_per_second": 1426.204,
62
+ "eval_steps_per_second": 89.203,
63
  "step": 10000
64
  },
65
  {
66
+ "epoch": 12.578616352201259,
67
+ "grad_norm": 2.500443458557129,
68
+ "learning_rate": 2.99925e-05,
69
+ "loss": 4.7358,
70
  "step": 12000
71
  },
72
  {
73
+ "epoch": 12.578616352201259,
74
+ "eval_loss": 4.983631134033203,
75
+ "eval_runtime": 0.9644,
76
+ "eval_samples_per_second": 1425.809,
77
+ "eval_steps_per_second": 89.178,
78
  "step": 12000
79
  },
80
  {
81
+ "epoch": 14.675052410901468,
82
+ "eval_loss": 4.882917404174805,
83
+ "eval_runtime": 0.9686,
84
+ "eval_samples_per_second": 1419.612,
85
+ "eval_steps_per_second": 88.79,
86
  "step": 14000
87
  },
88
  {
89
+ "epoch": 16.771488469601678,
90
+ "grad_norm": 2.400749444961548,
91
  "learning_rate": 3.999e-05,
92
+ "loss": 4.4216,
93
  "step": 16000
94
  },
95
  {
96
+ "epoch": 16.771488469601678,
97
+ "eval_loss": 4.802889823913574,
98
+ "eval_runtime": 0.9763,
99
+ "eval_samples_per_second": 1408.393,
100
+ "eval_steps_per_second": 88.089,
101
  "step": 16000
102
  },
103
  {
104
+ "epoch": 18.867924528301888,
105
+ "eval_loss": 4.74226188659668,
106
+ "eval_runtime": 0.976,
107
+ "eval_samples_per_second": 1408.882,
108
+ "eval_steps_per_second": 88.119,
109
  "step": 18000
110
  },
111
  {
112
+ "epoch": 20.964360587002098,
113
+ "grad_norm": 2.2613022327423096,
114
  "learning_rate": 4.9985e-05,
115
+ "loss": 4.1842,
116
  "step": 20000
117
  },
118
  {
119
+ "epoch": 20.964360587002098,
120
+ "eval_loss": 4.690371513366699,
121
+ "eval_runtime": 0.9669,
122
+ "eval_samples_per_second": 1422.037,
123
+ "eval_steps_per_second": 88.942,
124
  "step": 20000
125
  },
126
  {
127
+ "epoch": 23.060796645702307,
128
+ "eval_loss": 4.645771503448486,
129
+ "eval_runtime": 0.966,
130
+ "eval_samples_per_second": 1423.404,
131
+ "eval_steps_per_second": 89.027,
132
  "step": 22000
133
  },
134
  {
135
+ "epoch": 25.157232704402517,
136
+ "grad_norm": 2.2588465213775635,
137
  "learning_rate": 5.9980000000000005e-05,
138
+ "loss": 3.9858,
139
  "step": 24000
140
  },
141
  {
142
+ "epoch": 25.157232704402517,
143
+ "eval_loss": 4.623382568359375,
144
+ "eval_runtime": 0.9666,
145
+ "eval_samples_per_second": 1422.538,
146
+ "eval_steps_per_second": 88.973,
147
  "step": 24000
148
  },
149
  {
150
+ "epoch": 27.253668763102727,
151
+ "eval_loss": 4.6056084632873535,
152
+ "eval_runtime": 0.9825,
153
+ "eval_samples_per_second": 1399.422,
154
+ "eval_steps_per_second": 87.527,
155
  "step": 26000
156
  },
157
  {
158
+ "epoch": 29.350104821802937,
159
+ "grad_norm": 2.1845760345458984,
160
  "learning_rate": 6.997500000000001e-05,
161
+ "loss": 3.8189,
162
  "step": 28000
163
  },
164
  {
165
+ "epoch": 29.350104821802937,
166
+ "eval_loss": 4.590851783752441,
167
+ "eval_runtime": 1.0367,
168
+ "eval_samples_per_second": 1326.293,
169
+ "eval_steps_per_second": 82.954,
170
  "step": 28000
171
  },
172
  {
173
+ "epoch": 31.446540880503143,
174
+ "eval_loss": 4.586838245391846,
175
+ "eval_runtime": 0.9705,
176
+ "eval_samples_per_second": 1416.743,
177
+ "eval_steps_per_second": 88.611,
178
  "step": 30000
179
  },
180
  {
181
+ "epoch": 33.542976939203356,
182
+ "grad_norm": 2.2118289470672607,
183
  "learning_rate": 7.997e-05,
184
+ "loss": 3.6763,
185
  "step": 32000
186
  },
187
  {
188
+ "epoch": 33.542976939203356,
189
+ "eval_loss": 4.583011150360107,
190
+ "eval_runtime": 0.9706,
191
+ "eval_samples_per_second": 1416.604,
192
+ "eval_steps_per_second": 88.602,
193
  "step": 32000
194
  },
195
  {
196
+ "epoch": 35.63941299790356,
197
+ "eval_loss": 4.57816743850708,
198
+ "eval_runtime": 0.9657,
199
+ "eval_samples_per_second": 1423.818,
200
+ "eval_steps_per_second": 89.053,
201
  "step": 34000
202
  },
203
  {
204
+ "epoch": 37.735849056603776,
205
+ "grad_norm": 2.189404010772705,
206
  "learning_rate": 8.9965e-05,
207
+ "loss": 3.5493,
208
  "step": 36000
209
  },
210
  {
211
+ "epoch": 37.735849056603776,
212
+ "eval_loss": 4.585381031036377,
213
+ "eval_runtime": 0.9726,
214
+ "eval_samples_per_second": 1413.783,
215
+ "eval_steps_per_second": 88.426,
216
  "step": 36000
217
  },
218
  {
219
+ "epoch": 39.83228511530398,
220
+ "eval_loss": 4.596414566040039,
221
+ "eval_runtime": 0.971,
222
+ "eval_samples_per_second": 1416.025,
223
+ "eval_steps_per_second": 88.566,
224
  "step": 38000
225
  },
226
  {
227
+ "epoch": 41.928721174004195,
228
+ "grad_norm": 2.1832594871520996,
229
  "learning_rate": 9.996000000000001e-05,
230
+ "loss": 3.4327,
231
  "step": 40000
232
  },
233
  {
234
+ "epoch": 41.928721174004195,
235
+ "eval_loss": 4.610367774963379,
236
+ "eval_runtime": 0.9687,
237
+ "eval_samples_per_second": 1419.473,
238
+ "eval_steps_per_second": 88.782,
239
  "step": 40000
240
  },
241
  {
242
+ "epoch": 44.0251572327044,
243
+ "eval_loss": 4.636893272399902,
244
+ "eval_runtime": 0.9651,
245
+ "eval_samples_per_second": 1424.742,
246
+ "eval_steps_per_second": 89.111,
247
  "step": 42000
248
  },
249
  {
250
+ "epoch": 46.121593291404615,
251
+ "grad_norm": 2.1686387062072754,
252
  "learning_rate": 9.336333333333334e-05,
253
+ "loss": 3.3112,
254
  "step": 44000
255
  },
256
  {
257
+ "epoch": 46.121593291404615,
258
+ "eval_loss": 4.66969108581543,
259
+ "eval_runtime": 0.9642,
260
+ "eval_samples_per_second": 1426.082,
261
+ "eval_steps_per_second": 89.195,
262
  "step": 44000
263
  },
264
  {
265
+ "epoch": 48.21802935010482,
266
+ "eval_loss": 4.69525146484375,
267
+ "eval_runtime": 0.966,
268
+ "eval_samples_per_second": 1423.408,
269
+ "eval_steps_per_second": 89.028,
270
  "step": 46000
271
  },
272
  {
273
+ "epoch": 50.314465408805034,
274
+ "grad_norm": 2.3040497303009033,
275
  "learning_rate": 8.67e-05,
276
+ "loss": 3.1908,
277
  "step": 48000
278
  },
279
  {
280
+ "epoch": 50.314465408805034,
281
+ "eval_loss": 4.727965831756592,
282
+ "eval_runtime": 0.9829,
283
+ "eval_samples_per_second": 1398.863,
284
+ "eval_steps_per_second": 87.493,
285
  "step": 48000
286
  },
287
  {
288
+ "epoch": 52.41090146750524,
289
+ "eval_loss": 4.762918472290039,
290
+ "eval_runtime": 1.0048,
291
+ "eval_samples_per_second": 1368.397,
292
+ "eval_steps_per_second": 85.587,
293
  "step": 50000
294
  },
295
  {
296
+ "epoch": 54.507337526205454,
297
+ "grad_norm": 2.491647720336914,
298
  "learning_rate": 8.003666666666667e-05,
299
+ "loss": 3.0857,
300
  "step": 52000
301
  },
302
  {
303
+ "epoch": 54.507337526205454,
304
+ "eval_loss": 4.792750835418701,
305
+ "eval_runtime": 0.9654,
306
+ "eval_samples_per_second": 1424.262,
307
+ "eval_steps_per_second": 89.081,
308
  "step": 52000
309
  }
310
  ],
311
  "logging_steps": 4000,
312
  "max_steps": 100000,
313
  "num_input_tokens_seen": 0,
314
+ "num_train_epochs": 105,
315
  "save_steps": 4000,
316
  "stateful_callbacks": {
317
  "TrainerControl": {
 
325
  "attributes": {}
326
  }
327
  },
328
+ "total_flos": 1.3445443431727104e+16,
329
  "train_batch_size": 16,
330
  "trial_name": null,
331
  "trial_params": null
checkpoint-52000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6af9de70859d62cbd3b61f71e8a5bc95702dbc6ddb62d2994641a31953e4ea9b
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c779122676b54107edc62ae0b9293c062733193c5f82a36a2bc097bca192814
3
  size 5368