Skhaki commited on
Commit
09cac0b
1 Parent(s): 2735865

Add Mask2Former-T low-resolution checkpoint

Browse files
mask2former-swint-8xb2-512x1024-90k/mask2former-swint-8xb2-512x1024-90k.py ADDED
@@ -0,0 +1,558 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ auto_scale_lr = dict(base_batch_size=16, enable=False)
2
+ backbone_embed_multi = dict(decay_mult=0.0, lr_mult=0.1)
3
+ backbone_norm_multi = dict(decay_mult=0.0, lr_mult=0.1)
4
+ crop_size = (
5
+ 256,
6
+ 512,
7
+ )
8
+ custom_keys = dict({
9
+ 'absolute_pos_embed':
10
+ dict(decay_mult=0.0, lr_mult=0.1),
11
+ 'backbone':
12
+ dict(decay_mult=1.0, lr_mult=0.1),
13
+ 'backbone.norm':
14
+ dict(decay_mult=0.0, lr_mult=0.1),
15
+ 'backbone.patch_embed.norm':
16
+ dict(decay_mult=0.0, lr_mult=0.1),
17
+ 'backbone.stages.0.blocks.0.norm':
18
+ dict(decay_mult=0.0, lr_mult=0.1),
19
+ 'backbone.stages.0.blocks.1.norm':
20
+ dict(decay_mult=0.0, lr_mult=0.1),
21
+ 'backbone.stages.0.downsample.norm':
22
+ dict(decay_mult=0.0, lr_mult=0.1),
23
+ 'backbone.stages.1.blocks.0.norm':
24
+ dict(decay_mult=0.0, lr_mult=0.1),
25
+ 'backbone.stages.1.blocks.1.norm':
26
+ dict(decay_mult=0.0, lr_mult=0.1),
27
+ 'backbone.stages.1.downsample.norm':
28
+ dict(decay_mult=0.0, lr_mult=0.1),
29
+ 'backbone.stages.2.blocks.0.norm':
30
+ dict(decay_mult=0.0, lr_mult=0.1),
31
+ 'backbone.stages.2.blocks.1.norm':
32
+ dict(decay_mult=0.0, lr_mult=0.1),
33
+ 'backbone.stages.2.blocks.2.norm':
34
+ dict(decay_mult=0.0, lr_mult=0.1),
35
+ 'backbone.stages.2.blocks.3.norm':
36
+ dict(decay_mult=0.0, lr_mult=0.1),
37
+ 'backbone.stages.2.blocks.4.norm':
38
+ dict(decay_mult=0.0, lr_mult=0.1),
39
+ 'backbone.stages.2.blocks.5.norm':
40
+ dict(decay_mult=0.0, lr_mult=0.1),
41
+ 'backbone.stages.2.downsample.norm':
42
+ dict(decay_mult=0.0, lr_mult=0.1),
43
+ 'backbone.stages.3.blocks.0.norm':
44
+ dict(decay_mult=0.0, lr_mult=0.1),
45
+ 'backbone.stages.3.blocks.1.norm':
46
+ dict(decay_mult=0.0, lr_mult=0.1),
47
+ 'level_embed':
48
+ dict(decay_mult=0.0, lr_mult=1.0),
49
+ 'query_embed':
50
+ dict(decay_mult=0.0, lr_mult=1.0),
51
+ 'query_feat':
52
+ dict(decay_mult=0.0, lr_mult=1.0),
53
+ 'relative_position_bias_table':
54
+ dict(decay_mult=0.0, lr_mult=0.1)
55
+ })
56
+ data_preprocessor = dict(
57
+ bgr_to_rgb=True,
58
+ mean=[
59
+ 123.675,
60
+ 116.28,
61
+ 103.53,
62
+ ],
63
+ pad_val=0,
64
+ seg_pad_val=255,
65
+ size=(
66
+ 256,
67
+ 512,
68
+ ),
69
+ std=[
70
+ 58.395,
71
+ 57.12,
72
+ 57.375,
73
+ ],
74
+ test_cfg=dict(size_divisor=32),
75
+ type='SegDataPreProcessor')
76
+ data_root = '/dataset/cityscapes/'
77
+ dataset_type = 'CityscapesDataset'
78
+ default_hooks = dict(
79
+ checkpoint=dict(
80
+ by_epoch=False, interval=5000, save_best='mIoU',
81
+ type='CheckpointHook'),
82
+ logger=dict(interval=50, log_metric_by_epoch=False, type='LoggerHook'),
83
+ param_scheduler=dict(type='ParamSchedulerHook'),
84
+ sampler_seed=dict(type='DistSamplerSeedHook'),
85
+ timer=dict(type='IterTimerHook'),
86
+ visualization=dict(type='SegVisualizationHook'))
87
+ default_scope = 'mmseg'
88
+ depths = [
89
+ 2,
90
+ 2,
91
+ 6,
92
+ 2,
93
+ ]
94
+ embed_multi = dict(decay_mult=0.0, lr_mult=1.0)
95
+ env_cfg = dict(
96
+ cudnn_benchmark=True,
97
+ dist_cfg=dict(backend='nccl'),
98
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
99
+ img_ratios = [
100
+ 0.5,
101
+ 0.75,
102
+ 1.0,
103
+ 1.25,
104
+ 1.5,
105
+ 1.75,
106
+ ]
107
+ launcher = 'pytorch'
108
+ load_from = 'work_dirs/mask2former-swint-8xb2-512x1024-90k/mask2former-swint-8xb2-512x1024-90k_ckpt.pth'
109
+ log_level = 'INFO'
110
+ log_processor = dict(by_epoch=False)
111
+ model = dict(
112
+ backbone=dict(
113
+ attn_drop_rate=0.0,
114
+ depths=[
115
+ 2,
116
+ 2,
117
+ 6,
118
+ 2,
119
+ ],
120
+ drop_path_rate=0.3,
121
+ drop_rate=0.0,
122
+ embed_dims=96,
123
+ frozen_stages=-1,
124
+ init_cfg=dict(
125
+ checkpoint=
126
+ 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220317-1cdeb081.pth',
127
+ type='Pretrained'),
128
+ mlp_ratio=4,
129
+ num_heads=[
130
+ 3,
131
+ 6,
132
+ 12,
133
+ 24,
134
+ ],
135
+ out_indices=(
136
+ 0,
137
+ 1,
138
+ 2,
139
+ 3,
140
+ ),
141
+ patch_norm=True,
142
+ qk_scale=None,
143
+ qkv_bias=True,
144
+ type='SwinTransformer',
145
+ window_size=7,
146
+ with_cp=False),
147
+ data_preprocessor=dict(
148
+ bgr_to_rgb=True,
149
+ mean=[
150
+ 123.675,
151
+ 116.28,
152
+ 103.53,
153
+ ],
154
+ pad_val=0,
155
+ seg_pad_val=255,
156
+ size=(
157
+ 256,
158
+ 512,
159
+ ),
160
+ std=[
161
+ 58.395,
162
+ 57.12,
163
+ 57.375,
164
+ ],
165
+ test_cfg=dict(size_divisor=32),
166
+ type='SegDataPreProcessor'),
167
+ decode_head=dict(
168
+ align_corners=False,
169
+ enforce_decoder_input_project=False,
170
+ feat_channels=256,
171
+ in_channels=[
172
+ 96,
173
+ 192,
174
+ 384,
175
+ 768,
176
+ ],
177
+ loss_cls=dict(
178
+ class_weight=[
179
+ 1.0,
180
+ 1.0,
181
+ 1.0,
182
+ 1.0,
183
+ 1.0,
184
+ 1.0,
185
+ 1.0,
186
+ 1.0,
187
+ 1.0,
188
+ 1.0,
189
+ 1.0,
190
+ 1.0,
191
+ 1.0,
192
+ 1.0,
193
+ 1.0,
194
+ 1.0,
195
+ 1.0,
196
+ 1.0,
197
+ 1.0,
198
+ 0.1,
199
+ ],
200
+ loss_weight=2.0,
201
+ reduction='mean',
202
+ type='mmdet.CrossEntropyLoss',
203
+ use_sigmoid=False),
204
+ loss_dice=dict(
205
+ activate=True,
206
+ eps=1.0,
207
+ loss_weight=5.0,
208
+ naive_dice=True,
209
+ reduction='mean',
210
+ type='mmdet.DiceLoss',
211
+ use_sigmoid=True),
212
+ loss_mask=dict(
213
+ loss_weight=5.0,
214
+ reduction='mean',
215
+ type='mmdet.CrossEntropyLoss',
216
+ use_sigmoid=True),
217
+ num_classes=19,
218
+ num_queries=100,
219
+ num_transformer_feat_level=3,
220
+ out_channels=256,
221
+ pixel_decoder=dict(
222
+ act_cfg=dict(type='ReLU'),
223
+ encoder=dict(
224
+ init_cfg=None,
225
+ layer_cfg=dict(
226
+ ffn_cfg=dict(
227
+ act_cfg=dict(inplace=True, type='ReLU'),
228
+ embed_dims=256,
229
+ feedforward_channels=1024,
230
+ ffn_drop=0.0,
231
+ num_fcs=2),
232
+ self_attn_cfg=dict(
233
+ batch_first=True,
234
+ dropout=0.0,
235
+ embed_dims=256,
236
+ im2col_step=64,
237
+ init_cfg=None,
238
+ norm_cfg=None,
239
+ num_heads=8,
240
+ num_levels=3,
241
+ num_points=4)),
242
+ num_layers=6),
243
+ init_cfg=None,
244
+ norm_cfg=dict(num_groups=32, type='GN'),
245
+ num_outs=3,
246
+ positional_encoding=dict(normalize=True, num_feats=128),
247
+ type='mmdet.MSDeformAttnPixelDecoder'),
248
+ positional_encoding=dict(normalize=True, num_feats=128),
249
+ strides=[
250
+ 4,
251
+ 8,
252
+ 16,
253
+ 32,
254
+ ],
255
+ train_cfg=dict(
256
+ assigner=dict(
257
+ match_costs=[
258
+ dict(type='mmdet.ClassificationCost', weight=2.0),
259
+ dict(
260
+ type='mmdet.CrossEntropyLossCost',
261
+ use_sigmoid=True,
262
+ weight=5.0),
263
+ dict(
264
+ eps=1.0,
265
+ pred_act=True,
266
+ type='mmdet.DiceCost',
267
+ weight=5.0),
268
+ ],
269
+ type='mmdet.HungarianAssigner'),
270
+ importance_sample_ratio=0.75,
271
+ num_points=12544,
272
+ oversample_ratio=3.0,
273
+ sampler=dict(type='mmdet.MaskPseudoSampler')),
274
+ transformer_decoder=dict(
275
+ init_cfg=None,
276
+ layer_cfg=dict(
277
+ cross_attn_cfg=dict(
278
+ attn_drop=0.0,
279
+ batch_first=True,
280
+ dropout_layer=None,
281
+ embed_dims=256,
282
+ num_heads=8,
283
+ proj_drop=0.0),
284
+ ffn_cfg=dict(
285
+ act_cfg=dict(inplace=True, type='ReLU'),
286
+ add_identity=True,
287
+ dropout_layer=None,
288
+ embed_dims=256,
289
+ feedforward_channels=2048,
290
+ ffn_drop=0.0,
291
+ num_fcs=2),
292
+ self_attn_cfg=dict(
293
+ attn_drop=0.0,
294
+ batch_first=True,
295
+ dropout_layer=None,
296
+ embed_dims=256,
297
+ num_heads=8,
298
+ proj_drop=0.0)),
299
+ num_layers=9,
300
+ return_intermediate=True),
301
+ type='Mask2FormerHead'),
302
+ test_cfg=dict(mode='whole'),
303
+ train_cfg=dict(),
304
+ type='EncoderDecoder')
305
+ num_classes = 19
306
+ optim_wrapper = dict(
307
+ clip_grad=dict(max_norm=0.01, norm_type=2),
308
+ optimizer=dict(
309
+ betas=(
310
+ 0.9,
311
+ 0.999,
312
+ ),
313
+ eps=1e-08,
314
+ lr=0.0001,
315
+ type='AdamW',
316
+ weight_decay=0.05),
317
+ paramwise_cfg=dict(
318
+ custom_keys=dict({
319
+ 'absolute_pos_embed':
320
+ dict(decay_mult=0.0, lr_mult=0.1),
321
+ 'backbone':
322
+ dict(decay_mult=1.0, lr_mult=0.1),
323
+ 'backbone.norm':
324
+ dict(decay_mult=0.0, lr_mult=0.1),
325
+ 'backbone.patch_embed.norm':
326
+ dict(decay_mult=0.0, lr_mult=0.1),
327
+ 'backbone.stages.0.blocks.0.norm':
328
+ dict(decay_mult=0.0, lr_mult=0.1),
329
+ 'backbone.stages.0.blocks.1.norm':
330
+ dict(decay_mult=0.0, lr_mult=0.1),
331
+ 'backbone.stages.0.downsample.norm':
332
+ dict(decay_mult=0.0, lr_mult=0.1),
333
+ 'backbone.stages.1.blocks.0.norm':
334
+ dict(decay_mult=0.0, lr_mult=0.1),
335
+ 'backbone.stages.1.blocks.1.norm':
336
+ dict(decay_mult=0.0, lr_mult=0.1),
337
+ 'backbone.stages.1.downsample.norm':
338
+ dict(decay_mult=0.0, lr_mult=0.1),
339
+ 'backbone.stages.2.blocks.0.norm':
340
+ dict(decay_mult=0.0, lr_mult=0.1),
341
+ 'backbone.stages.2.blocks.1.norm':
342
+ dict(decay_mult=0.0, lr_mult=0.1),
343
+ 'backbone.stages.2.blocks.2.norm':
344
+ dict(decay_mult=0.0, lr_mult=0.1),
345
+ 'backbone.stages.2.blocks.3.norm':
346
+ dict(decay_mult=0.0, lr_mult=0.1),
347
+ 'backbone.stages.2.blocks.4.norm':
348
+ dict(decay_mult=0.0, lr_mult=0.1),
349
+ 'backbone.stages.2.blocks.5.norm':
350
+ dict(decay_mult=0.0, lr_mult=0.1),
351
+ 'backbone.stages.2.downsample.norm':
352
+ dict(decay_mult=0.0, lr_mult=0.1),
353
+ 'backbone.stages.3.blocks.0.norm':
354
+ dict(decay_mult=0.0, lr_mult=0.1),
355
+ 'backbone.stages.3.blocks.1.norm':
356
+ dict(decay_mult=0.0, lr_mult=0.1),
357
+ 'level_embed':
358
+ dict(decay_mult=0.0, lr_mult=1.0),
359
+ 'query_embed':
360
+ dict(decay_mult=0.0, lr_mult=1.0),
361
+ 'query_feat':
362
+ dict(decay_mult=0.0, lr_mult=1.0),
363
+ 'relative_position_bias_table':
364
+ dict(decay_mult=0.0, lr_mult=0.1)
365
+ }),
366
+ norm_decay_mult=0.0),
367
+ type='OptimWrapper')
368
+ optimizer = dict(
369
+ betas=(
370
+ 0.9,
371
+ 0.999,
372
+ ),
373
+ eps=1e-08,
374
+ lr=0.0001,
375
+ type='AdamW',
376
+ weight_decay=0.05)
377
+ param_scheduler = [
378
+ dict(
379
+ begin=0,
380
+ by_epoch=False,
381
+ end=90000,
382
+ eta_min=0,
383
+ power=0.9,
384
+ type='PolyLR'),
385
+ ]
386
+ pretrained = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220317-1cdeb081.pth'
387
+ resume = False
388
+ test_cfg = dict(type='TestLoop')
389
+ test_dataloader = dict(
390
+ batch_size=1,
391
+ dataset=dict(
392
+ data_prefix=dict(
393
+ img_path='leftImg8bit/val', seg_map_path='gtFine/val'),
394
+ data_root='/dataset/cityscapes/',
395
+ pipeline=[
396
+ dict(type='LoadImageFromFile'),
397
+ dict(keep_ratio=True, scale=(
398
+ 2048,
399
+ 1024,
400
+ ), type='Resize'),
401
+ dict(type='LoadAnnotations'),
402
+ dict(type='PackSegInputs'),
403
+ ],
404
+ type='CityscapesDataset'),
405
+ num_workers=4,
406
+ persistent_workers=True,
407
+ sampler=dict(shuffle=False, type='DefaultSampler'))
408
+ test_evaluator = dict(
409
+ iou_metrics=[
410
+ 'mIoU',
411
+ ], type='IoUMetric')
412
+ test_pipeline = [
413
+ dict(type='LoadImageFromFile'),
414
+ dict(keep_ratio=True, scale=(
415
+ 2048,
416
+ 1024,
417
+ ), type='Resize'),
418
+ dict(type='LoadAnnotations'),
419
+ dict(type='PackSegInputs'),
420
+ ]
421
+ train_cfg = dict(max_iters=90000, type='IterBasedTrainLoop', val_interval=5000)
422
+ train_dataloader = dict(
423
+ batch_size=2,
424
+ dataset=dict(
425
+ data_prefix=dict(
426
+ img_path='leftImg8bit/train', seg_map_path='gtFine/train'),
427
+ data_root='/dataset/cityscapes/',
428
+ pipeline=[
429
+ dict(type='LoadImageFromFile'),
430
+ dict(type='LoadAnnotations'),
431
+ dict(
432
+ max_size=4096,
433
+ resize_type='ResizeShortestEdge',
434
+ scales=[
435
+ 512,
436
+ 614,
437
+ 716,
438
+ 819,
439
+ 921,
440
+ 1024,
441
+ 1126,
442
+ 1228,
443
+ 1331,
444
+ 1433,
445
+ 1536,
446
+ 1638,
447
+ 1740,
448
+ 1843,
449
+ 1945,
450
+ 2048,
451
+ ],
452
+ type='RandomChoiceResize'),
453
+ dict(
454
+ cat_max_ratio=0.75, crop_size=(
455
+ 256,
456
+ 512,
457
+ ), type='RandomCrop'),
458
+ dict(prob=0.5, type='RandomFlip'),
459
+ dict(type='PhotoMetricDistortion'),
460
+ dict(type='PackSegInputs'),
461
+ ],
462
+ type='CityscapesDataset'),
463
+ num_workers=2,
464
+ persistent_workers=True,
465
+ sampler=dict(shuffle=True, type='InfiniteSampler'))
466
+ train_pipeline = [
467
+ dict(type='LoadImageFromFile'),
468
+ dict(type='LoadAnnotations'),
469
+ dict(
470
+ max_size=4096,
471
+ resize_type='ResizeShortestEdge',
472
+ scales=[
473
+ 512,
474
+ 614,
475
+ 716,
476
+ 819,
477
+ 921,
478
+ 1024,
479
+ 1126,
480
+ 1228,
481
+ 1331,
482
+ 1433,
483
+ 1536,
484
+ 1638,
485
+ 1740,
486
+ 1843,
487
+ 1945,
488
+ 2048,
489
+ ],
490
+ type='RandomChoiceResize'),
491
+ dict(cat_max_ratio=0.75, crop_size=(
492
+ 256,
493
+ 512,
494
+ ), type='RandomCrop'),
495
+ dict(prob=0.5, type='RandomFlip'),
496
+ dict(type='PhotoMetricDistortion'),
497
+ dict(type='PackSegInputs'),
498
+ ]
499
+ tta_model = dict(type='SegTTAModel')
500
+ tta_pipeline = [
501
+ dict(backend_args=None, type='LoadImageFromFile'),
502
+ dict(
503
+ transforms=[
504
+ [
505
+ dict(keep_ratio=True, scale_factor=0.5, type='Resize'),
506
+ dict(keep_ratio=True, scale_factor=0.75, type='Resize'),
507
+ dict(keep_ratio=True, scale_factor=1.0, type='Resize'),
508
+ dict(keep_ratio=True, scale_factor=1.25, type='Resize'),
509
+ dict(keep_ratio=True, scale_factor=1.5, type='Resize'),
510
+ dict(keep_ratio=True, scale_factor=1.75, type='Resize'),
511
+ ],
512
+ [
513
+ dict(direction='horizontal', prob=0.0, type='RandomFlip'),
514
+ dict(direction='horizontal', prob=1.0, type='RandomFlip'),
515
+ ],
516
+ [
517
+ dict(type='LoadAnnotations'),
518
+ ],
519
+ [
520
+ dict(type='PackSegInputs'),
521
+ ],
522
+ ],
523
+ type='TestTimeAug'),
524
+ ]
525
+ val_cfg = dict(type='ValLoop')
526
+ val_dataloader = dict(
527
+ batch_size=1,
528
+ dataset=dict(
529
+ data_prefix=dict(
530
+ img_path='leftImg8bit/val', seg_map_path='gtFine/val'),
531
+ data_root='/dataset/cityscapes/',
532
+ pipeline=[
533
+ dict(type='LoadImageFromFile'),
534
+ dict(keep_ratio=True, scale=(
535
+ 2048,
536
+ 1024,
537
+ ), type='Resize'),
538
+ dict(type='LoadAnnotations'),
539
+ dict(type='PackSegInputs'),
540
+ ],
541
+ type='CityscapesDataset'),
542
+ num_workers=4,
543
+ persistent_workers=True,
544
+ sampler=dict(shuffle=False, type='DefaultSampler'))
545
+ val_evaluator = dict(
546
+ iou_metrics=[
547
+ 'mIoU',
548
+ ], type='IoUMetric')
549
+ vis_backends = [
550
+ dict(type='LocalVisBackend'),
551
+ ]
552
+ visualizer = dict(
553
+ name='visualizer',
554
+ type='SegLocalVisualizer',
555
+ vis_backends=[
556
+ dict(type='LocalVisBackend'),
557
+ ])
558
+ work_dir = './work_dirs/mask2former-swint-8xb2-512x1024-90k'
mask2former-swint-8xb2-512x1024-90k/mask2former-swint-8xb2-512x1024-90k_ckpt.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d1f9120df9c9046743ecb3b2b8162588563544c9c27d74761fd3751c38e8d86
3
+ size 622958776