File size: 2,071 Bytes
b13b124 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
_base_ = [
'../_base_/models/setr_mla.py',
'../_base_/datasets/FoodSeg103_768x768.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_80k.py'
]
model = dict(
backbone=dict(
img_size=768,
model_name='vit_base_patch16_224',
pretrain_weights='pretrained_model/VIT_base_224_ReLeM.pth',
embed_dim=768,
depth=12,
num_heads=12,
pos_embed_interp=True,
drop_rate=0.,
mla_channels=256,
mla_index=(5,7,9,11)
),
decode_head=dict(img_size=768,mla_channels=256,mlahead_channels=128,num_classes=104),
auxiliary_head=[
dict(
type='VIT_MLA_AUXIHead',
in_channels=256,
channels=512,
in_index=0,
img_size=768,
num_classes=104,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='VIT_MLA_AUXIHead',
in_channels=256,
channels=512,
in_index=1,
img_size=768,
num_classes=104,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='VIT_MLA_AUXIHead',
in_channels=256,
channels=512,
in_index=2,
img_size=768,
num_classes=104,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
dict(
type='VIT_MLA_AUXIHead',
in_channels=256,
channels=512,
in_index=3,
img_size=768,
num_classes=104,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
])
optimizer = dict(lr=0.002, weight_decay=0.0,
paramwise_cfg = dict(custom_keys={'head': dict(lr_mult=10.)})
)
crop_size = (768, 768)
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(512, 512))
find_unused_parameters = True
data = dict(samples_per_gpu=1)
|