File size: 2,433 Bytes
985cc7f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
_base_ = [
'../_base_/models/setr_naive_pup.py',
'../_base_/datasets/FoodSeg103_768x768.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
backbone=dict(
img_size=768,
model_name='vit_base_patch16_224',
embed_dim=768,
depth=12,
num_heads=12,
pos_embed_interp=True,
align_corners=False,
num_classes=104,
drop_rate=0.
),
decode_head=dict(
img_size=768,
in_channels=768,
in_index=11,
channels=512,
num_classes=104,
embed_dim=768,
align_corners=False,
num_conv=2,
upsampling_method='bilinear',
),
auxiliary_head=[
dict(
type='VisionTransformerUpHead',
in_channels=768,
channels=512,
in_index=5,
img_size=768,
embed_dim=768,
num_classes=104,
norm_cfg=norm_cfg,
num_conv=2,
upsampling_method='bilinear',
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='VisionTransformerUpHead',
in_channels=768,
channels=512,
in_index=7,
img_size=768,
embed_dim=768,
num_classes=104,
norm_cfg=norm_cfg,
num_conv=2,
upsampling_method='bilinear',
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='VisionTransformerUpHead',
in_channels=768,
channels=512,
in_index=9,
img_size=768,
embed_dim=768,
num_classes=104,
norm_cfg=norm_cfg,
num_conv=2,
upsampling_method='bilinear',
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
])
optimizer = dict(lr=0.01, weight_decay=0.0, paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)}))
crop_size = (768, 768)
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(512, 512))
find_unused_parameters = True
data = dict(samples_per_gpu=1)
|