mgh6 commited on
Commit
70e91ea
·
verified ·
1 Parent(s): 0b8d33f

Training in progress, epoch 1, checkpoint

Browse files
last-checkpoint/config.json CHANGED
@@ -1,21 +1,30 @@
1
  {
2
- "_name_or_path": "tattabio/gLM2_650M",
3
  "architectures": [
4
- "gLM2ForMaskedLM"
5
  ],
6
- "auto_map": {
7
- "AutoConfig": "configuration_glm2.gLM2Config",
8
- "AutoModel": "modeling_glm2.gLM2Model",
9
- "AutoModelForMaskedLM": "modeling_glm2.gLM2ForMaskedLM"
10
- },
11
- "depth": 33,
12
- "dim": 1280,
13
- "ffn_dim_multiplier": null,
14
- "heads": 20,
15
- "model_type": "gLM2",
16
- "norm_eps": 1e-05,
17
- "swiglu_multiple_of": 256,
 
 
 
 
 
 
 
18
  "torch_dtype": "float32",
19
  "transformers_version": "4.49.0",
20
- "vocab_size": 37
 
 
21
  }
 
1
  {
2
+ "_name_or_path": "facebook/esm2_t33_650M_UR50D",
3
  "architectures": [
4
+ "EsmForMaskedLM"
5
  ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout": null,
8
+ "emb_layer_norm_before": false,
9
+ "esmfold_config": null,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.0,
12
+ "hidden_size": 1280,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 5120,
15
+ "is_folding_model": false,
16
+ "layer_norm_eps": 1e-05,
17
+ "mask_token_id": 32,
18
+ "max_position_embeddings": 1026,
19
+ "model_type": "esm",
20
+ "num_attention_heads": 20,
21
+ "num_hidden_layers": 33,
22
+ "pad_token_id": 1,
23
+ "position_embedding_type": "rotary",
24
+ "token_dropout": true,
25
  "torch_dtype": "float32",
26
  "transformers_version": "4.49.0",
27
+ "use_cache": true,
28
+ "vocab_list": null,
29
+ "vocab_size": 33
30
  }
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60a8151bee68255d9064bbfdc2170764612ef8d251c1027fa3d7f12321916dbc
3
- size 2682482800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:414be13553d235730cd6c247e44cbd6a06aedc9a3e32ed0e9d4ae9d408220e05
3
+ size 2609498088
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40239a22d776e1fb8b4210a19644be230adf89378f030068f6bdc92cdbebfd01
3
- size 5365108834
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98f0ac03d2d0d85353d96c13a42f8dd4e6648b6dc703fec691b45435eb8c1437
3
+ size 5208796146
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67e8fb856d223a5af24cc75d2aa8b4de37cdfc3cbf75d495b03ac0cbca8dbef4
3
  size 15006
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d880ce44acc3bc8f93d20fb478a852664f88c91d3c7c6a2fac143962de832a8b
3
  size 15006
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c15dd75d8ac97bb6e7d4107e91cba13385d6d6961fc51bf55911773daaa9d375
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bca4dbe650e04bc8012dd3f1938dfb2a637329721abd75c3bd59d28a64007b54
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,466 +1,46 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 5.997206530510894,
5
  "eval_steps": 50,
6
- "global_step": 1506,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.1986467192252778,
13
- "grad_norm": 1.6958907842636108,
14
- "learning_rate": 9.800796812749005e-05,
15
- "loss": 1.6362,
16
  "step": 50
17
  },
18
  {
19
- "epoch": 0.1986467192252778,
20
- "eval_loss": 1.5567175149917603,
21
- "eval_runtime": 14.8041,
22
- "eval_samples_per_second": 57.281,
23
- "eval_steps_per_second": 28.641,
24
  "step": 50
25
  },
26
  {
27
- "epoch": 0.3972934384505556,
28
- "grad_norm": 1.6160608530044556,
29
- "learning_rate": 9.601593625498009e-05,
30
- "loss": 1.5283,
31
  "step": 100
32
  },
33
  {
34
- "epoch": 0.3972934384505556,
35
- "eval_loss": 1.5002830028533936,
36
- "eval_runtime": 14.7266,
37
- "eval_samples_per_second": 57.583,
38
- "eval_steps_per_second": 28.792,
39
  "step": 100
40
- },
41
- {
42
- "epoch": 0.5959401576758334,
43
- "grad_norm": 1.500954270362854,
44
- "learning_rate": 9.402390438247013e-05,
45
- "loss": 1.4825,
46
- "step": 150
47
- },
48
- {
49
- "epoch": 0.5959401576758334,
50
- "eval_loss": 1.4542008638381958,
51
- "eval_runtime": 14.5669,
52
- "eval_samples_per_second": 58.214,
53
- "eval_steps_per_second": 29.107,
54
- "step": 150
55
- },
56
- {
57
- "epoch": 0.7945868769011112,
58
- "grad_norm": 0.8321912884712219,
59
- "learning_rate": 9.203187250996016e-05,
60
- "loss": 1.4431,
61
- "step": 200
62
- },
63
- {
64
- "epoch": 0.7945868769011112,
65
- "eval_loss": 1.4306951761245728,
66
- "eval_runtime": 14.563,
67
- "eval_samples_per_second": 58.23,
68
- "eval_steps_per_second": 29.115,
69
- "step": 200
70
- },
71
- {
72
- "epoch": 0.993233596126389,
73
- "grad_norm": 1.2672511339187622,
74
- "learning_rate": 9.00398406374502e-05,
75
- "loss": 1.4083,
76
- "step": 250
77
- },
78
- {
79
- "epoch": 0.993233596126389,
80
- "eval_loss": 1.3854182958602905,
81
- "eval_runtime": 14.6247,
82
- "eval_samples_per_second": 57.984,
83
- "eval_steps_per_second": 28.992,
84
- "step": 250
85
- },
86
- {
87
- "epoch": 1.1946737848407722,
88
- "grad_norm": 1.051540493965149,
89
- "learning_rate": 8.804780876494024e-05,
90
- "loss": 1.3707,
91
- "step": 300
92
- },
93
- {
94
- "epoch": 1.1946737848407722,
95
- "eval_loss": 1.3773671388626099,
96
- "eval_runtime": 14.6834,
97
- "eval_samples_per_second": 57.752,
98
- "eval_steps_per_second": 28.876,
99
- "step": 300
100
- },
101
- {
102
- "epoch": 1.39332050406605,
103
- "grad_norm": 0.9810565710067749,
104
- "learning_rate": 8.605577689243029e-05,
105
- "loss": 1.331,
106
- "step": 350
107
- },
108
- {
109
- "epoch": 1.39332050406605,
110
- "eval_loss": 1.33962881565094,
111
- "eval_runtime": 14.6332,
112
- "eval_samples_per_second": 57.95,
113
- "eval_steps_per_second": 28.975,
114
- "step": 350
115
- },
116
- {
117
- "epoch": 1.5919672232913278,
118
- "grad_norm": 1.0661150217056274,
119
- "learning_rate": 8.406374501992032e-05,
120
- "loss": 1.314,
121
- "step": 400
122
- },
123
- {
124
- "epoch": 1.5919672232913278,
125
- "eval_loss": 1.3049601316452026,
126
- "eval_runtime": 14.7683,
127
- "eval_samples_per_second": 57.42,
128
- "eval_steps_per_second": 28.71,
129
- "step": 400
130
- },
131
- {
132
- "epoch": 1.7906139425166057,
133
- "grad_norm": 1.1233532428741455,
134
- "learning_rate": 8.207171314741037e-05,
135
- "loss": 1.2799,
136
- "step": 450
137
- },
138
- {
139
- "epoch": 1.7906139425166057,
140
- "eval_loss": 1.2853434085845947,
141
- "eval_runtime": 14.6881,
142
- "eval_samples_per_second": 57.734,
143
- "eval_steps_per_second": 28.867,
144
- "step": 450
145
- },
146
- {
147
- "epoch": 1.9892606617418833,
148
- "grad_norm": 0.9072484970092773,
149
- "learning_rate": 8.00796812749004e-05,
150
- "loss": 1.2684,
151
- "step": 500
152
- },
153
- {
154
- "epoch": 1.9892606617418833,
155
- "eval_loss": 1.2741819620132446,
156
- "eval_runtime": 14.6458,
157
- "eval_samples_per_second": 57.901,
158
- "eval_steps_per_second": 28.95,
159
- "step": 500
160
- },
161
- {
162
- "epoch": 2.190700850456267,
163
- "grad_norm": 1.057379961013794,
164
- "learning_rate": 7.808764940239044e-05,
165
- "loss": 1.241,
166
- "step": 550
167
- },
168
- {
169
- "epoch": 2.190700850456267,
170
- "eval_loss": 1.25983726978302,
171
- "eval_runtime": 14.6655,
172
- "eval_samples_per_second": 57.823,
173
- "eval_steps_per_second": 28.911,
174
- "step": 550
175
- },
176
- {
177
- "epoch": 2.3893475696815445,
178
- "grad_norm": 1.0341771841049194,
179
- "learning_rate": 7.609561752988048e-05,
180
- "loss": 1.2102,
181
- "step": 600
182
- },
183
- {
184
- "epoch": 2.3893475696815445,
185
- "eval_loss": 1.2425962686538696,
186
- "eval_runtime": 14.6716,
187
- "eval_samples_per_second": 57.799,
188
- "eval_steps_per_second": 28.899,
189
- "step": 600
190
- },
191
- {
192
- "epoch": 2.587994288906822,
193
- "grad_norm": 0.9421936869621277,
194
- "learning_rate": 7.410358565737052e-05,
195
- "loss": 1.2018,
196
- "step": 650
197
- },
198
- {
199
- "epoch": 2.587994288906822,
200
- "eval_loss": 1.2194551229476929,
201
- "eval_runtime": 14.6762,
202
- "eval_samples_per_second": 57.781,
203
- "eval_steps_per_second": 28.89,
204
- "step": 650
205
- },
206
- {
207
- "epoch": 2.7866410081321,
208
- "grad_norm": 1.0019429922103882,
209
- "learning_rate": 7.211155378486057e-05,
210
- "loss": 1.1846,
211
- "step": 700
212
- },
213
- {
214
- "epoch": 2.7866410081321,
215
- "eval_loss": 1.2068556547164917,
216
- "eval_runtime": 14.7447,
217
- "eval_samples_per_second": 57.512,
218
- "eval_steps_per_second": 28.756,
219
- "step": 700
220
- },
221
- {
222
- "epoch": 2.985287727357378,
223
- "grad_norm": 1.0412020683288574,
224
- "learning_rate": 7.01195219123506e-05,
225
- "loss": 1.1678,
226
- "step": 750
227
- },
228
- {
229
- "epoch": 2.985287727357378,
230
- "eval_loss": 1.199570655822754,
231
- "eval_runtime": 17.8039,
232
- "eval_samples_per_second": 47.63,
233
- "eval_steps_per_second": 23.815,
234
- "step": 750
235
- },
236
- {
237
- "epoch": 3.186727916071761,
238
- "grad_norm": 0.9792215824127197,
239
- "learning_rate": 6.812749003984064e-05,
240
- "loss": 1.1527,
241
- "step": 800
242
- },
243
- {
244
- "epoch": 3.186727916071761,
245
- "eval_loss": 1.1857693195343018,
246
- "eval_runtime": 14.7604,
247
- "eval_samples_per_second": 57.451,
248
- "eval_steps_per_second": 28.726,
249
- "step": 800
250
- },
251
- {
252
- "epoch": 3.385374635297039,
253
- "grad_norm": 0.916307806968689,
254
- "learning_rate": 6.613545816733068e-05,
255
- "loss": 1.1294,
256
- "step": 850
257
- },
258
- {
259
- "epoch": 3.385374635297039,
260
- "eval_loss": 1.1673567295074463,
261
- "eval_runtime": 32.8585,
262
- "eval_samples_per_second": 25.808,
263
- "eval_steps_per_second": 12.904,
264
- "step": 850
265
- },
266
- {
267
- "epoch": 3.5840213545223167,
268
- "grad_norm": 0.9643361568450928,
269
- "learning_rate": 6.414342629482072e-05,
270
- "loss": 1.1162,
271
- "step": 900
272
- },
273
- {
274
- "epoch": 3.5840213545223167,
275
- "eval_loss": 1.1727643013000488,
276
- "eval_runtime": 14.7175,
277
- "eval_samples_per_second": 57.619,
278
- "eval_steps_per_second": 28.809,
279
- "step": 900
280
- },
281
- {
282
- "epoch": 3.7826680737475944,
283
- "grad_norm": 0.9754778146743774,
284
- "learning_rate": 6.215139442231077e-05,
285
- "loss": 1.1016,
286
- "step": 950
287
- },
288
- {
289
- "epoch": 3.7826680737475944,
290
- "eval_loss": 1.1499500274658203,
291
- "eval_runtime": 14.7384,
292
- "eval_samples_per_second": 57.537,
293
- "eval_steps_per_second": 28.768,
294
- "step": 950
295
- },
296
- {
297
- "epoch": 3.9813147929728725,
298
- "grad_norm": 0.9538551568984985,
299
- "learning_rate": 6.01593625498008e-05,
300
- "loss": 1.0814,
301
- "step": 1000
302
- },
303
- {
304
- "epoch": 3.9813147929728725,
305
- "eval_loss": 1.1356687545776367,
306
- "eval_runtime": 14.7227,
307
- "eval_samples_per_second": 57.598,
308
- "eval_steps_per_second": 28.799,
309
- "step": 1000
310
- },
311
- {
312
- "epoch": 4.182754981687255,
313
- "grad_norm": 0.9160069227218628,
314
- "learning_rate": 5.816733067729084e-05,
315
- "loss": 1.0749,
316
- "step": 1050
317
- },
318
- {
319
- "epoch": 4.182754981687255,
320
- "eval_loss": 1.1225874423980713,
321
- "eval_runtime": 14.7411,
322
- "eval_samples_per_second": 57.526,
323
- "eval_steps_per_second": 28.763,
324
- "step": 1050
325
- },
326
- {
327
- "epoch": 4.381401700912534,
328
- "grad_norm": 1.1243151426315308,
329
- "learning_rate": 5.6175298804780876e-05,
330
- "loss": 1.0462,
331
- "step": 1100
332
- },
333
- {
334
- "epoch": 4.381401700912534,
335
- "eval_loss": 1.1159089803695679,
336
- "eval_runtime": 14.5859,
337
- "eval_samples_per_second": 58.138,
338
- "eval_steps_per_second": 29.069,
339
- "step": 1100
340
- },
341
- {
342
- "epoch": 4.580048420137811,
343
- "grad_norm": 1.018583059310913,
344
- "learning_rate": 5.418326693227092e-05,
345
- "loss": 1.052,
346
- "step": 1150
347
- },
348
- {
349
- "epoch": 4.580048420137811,
350
- "eval_loss": 1.1180405616760254,
351
- "eval_runtime": 14.8148,
352
- "eval_samples_per_second": 57.24,
353
- "eval_steps_per_second": 28.62,
354
- "step": 1150
355
- },
356
- {
357
- "epoch": 4.778695139363089,
358
- "grad_norm": 0.9607245922088623,
359
- "learning_rate": 5.219123505976096e-05,
360
- "loss": 1.0432,
361
- "step": 1200
362
- },
363
- {
364
- "epoch": 4.778695139363089,
365
- "eval_loss": 1.0965369939804077,
366
- "eval_runtime": 14.6889,
367
- "eval_samples_per_second": 57.731,
368
- "eval_steps_per_second": 28.865,
369
- "step": 1200
370
- },
371
- {
372
- "epoch": 4.977341858588367,
373
- "grad_norm": 1.06922447681427,
374
- "learning_rate": 5.0199203187251e-05,
375
- "loss": 1.0289,
376
- "step": 1250
377
- },
378
- {
379
- "epoch": 4.977341858588367,
380
- "eval_loss": 1.1008275747299194,
381
- "eval_runtime": 14.6242,
382
- "eval_samples_per_second": 57.986,
383
- "eval_steps_per_second": 28.993,
384
- "step": 1250
385
- },
386
- {
387
- "epoch": 5.17878204730275,
388
- "grad_norm": 1.1285374164581299,
389
- "learning_rate": 4.820717131474104e-05,
390
- "loss": 1.0088,
391
- "step": 1300
392
- },
393
- {
394
- "epoch": 5.17878204730275,
395
- "eval_loss": 1.0875197649002075,
396
- "eval_runtime": 14.7256,
397
- "eval_samples_per_second": 57.587,
398
- "eval_steps_per_second": 28.793,
399
- "step": 1300
400
- },
401
- {
402
- "epoch": 5.377428766528028,
403
- "grad_norm": 0.9647625684738159,
404
- "learning_rate": 4.6215139442231074e-05,
405
- "loss": 1.0,
406
- "step": 1350
407
- },
408
- {
409
- "epoch": 5.377428766528028,
410
- "eval_loss": 1.0778887271881104,
411
- "eval_runtime": 14.6228,
412
- "eval_samples_per_second": 57.992,
413
- "eval_steps_per_second": 28.996,
414
- "step": 1350
415
- },
416
- {
417
- "epoch": 5.5760754857533055,
418
- "grad_norm": 1.074511170387268,
419
- "learning_rate": 4.4223107569721116e-05,
420
- "loss": 0.9925,
421
- "step": 1400
422
- },
423
- {
424
- "epoch": 5.5760754857533055,
425
- "eval_loss": 1.0578875541687012,
426
- "eval_runtime": 14.6013,
427
- "eval_samples_per_second": 58.077,
428
- "eval_steps_per_second": 29.038,
429
- "step": 1400
430
- },
431
- {
432
- "epoch": 5.774722204978583,
433
- "grad_norm": 0.999279797077179,
434
- "learning_rate": 4.223107569721116e-05,
435
- "loss": 0.9831,
436
- "step": 1450
437
- },
438
- {
439
- "epoch": 5.774722204978583,
440
- "eval_loss": 1.068110466003418,
441
- "eval_runtime": 14.6065,
442
- "eval_samples_per_second": 58.056,
443
- "eval_steps_per_second": 29.028,
444
- "step": 1450
445
- },
446
- {
447
- "epoch": 5.973368924203861,
448
- "grad_norm": 1.027130126953125,
449
- "learning_rate": 4.02390438247012e-05,
450
- "loss": 0.9744,
451
- "step": 1500
452
- },
453
- {
454
- "epoch": 5.973368924203861,
455
- "eval_loss": 1.0516407489776611,
456
- "eval_runtime": 14.5827,
457
- "eval_samples_per_second": 58.151,
458
- "eval_steps_per_second": 29.076,
459
- "step": 1500
460
  }
461
  ],
462
  "logging_steps": 50,
463
- "max_steps": 2510,
464
  "num_input_tokens_seen": 0,
465
  "num_train_epochs": 10,
466
  "save_steps": 500,
@@ -476,7 +56,7 @@
476
  "attributes": {}
477
  }
478
  },
479
- "total_flos": 2.404255336140636e+17,
480
  "train_batch_size": 2,
481
  "trial_name": null,
482
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
  "eval_steps": 50,
6
+ "global_step": 129,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.3904343582235237,
13
+ "grad_norm": 0.5675944685935974,
14
+ "learning_rate": 9.609375e-05,
15
+ "loss": 1.5678,
16
  "step": 50
17
  },
18
  {
19
+ "epoch": 0.3904343582235237,
20
+ "eval_loss": 1.53541898727417,
21
+ "eval_runtime": 11.6265,
22
+ "eval_samples_per_second": 37.156,
23
+ "eval_steps_per_second": 18.578,
24
  "step": 50
25
  },
26
  {
27
+ "epoch": 0.7808687164470474,
28
+ "grad_norm": 0.5330150127410889,
29
+ "learning_rate": 9.21875e-05,
30
+ "loss": 1.5019,
31
  "step": 100
32
  },
33
  {
34
+ "epoch": 0.7808687164470474,
35
+ "eval_loss": 1.4973269701004028,
36
+ "eval_runtime": 11.5507,
37
+ "eval_samples_per_second": 37.4,
38
+ "eval_steps_per_second": 18.7,
39
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  }
41
  ],
42
  "logging_steps": 50,
43
+ "max_steps": 1280,
44
  "num_input_tokens_seen": 0,
45
  "num_train_epochs": 10,
46
  "save_steps": 500,
 
56
  "attributes": {}
57
  }
58
  },
59
+ "total_flos": 3.5663179337957376e+16,
60
  "train_batch_size": 2,
61
  "trial_name": null,
62
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d60cd4fa81843b4806dea3364d37d3df9835095733d168d051c0b135e77b91aa
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:497104d84493788ba6f9029b34e87263e741d66179363d27985c9e12854dd130
3
  size 5368