Spaces:
Sleeping
Sleeping
File size: 1,650 Bytes
9bf4bd7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
_base_ = [
'_base_mask-rcnn_resnet50_fpn.py',
'../_base_/datasets/ctw1500.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_sgd_base.py',
]
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.08))
train_cfg = dict(max_epochs=160)
# learning policy
param_scheduler = [
dict(type='LinearLR', end=500, start_factor=0.001, by_epoch=False),
dict(type='MultiStepLR', milestones=[80, 128], end=160),
]
# dataset settings
ctw1500_textdet_train = _base_.ctw1500_textdet_train
ctw1500_textdet_test = _base_.ctw1500_textdet_test
# test pipeline for CTW1500
ctw_test_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(type='Resize', scale=(1600, 1600), keep_ratio=True),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(
type='LoadOCRAnnotations',
with_polygon=True,
with_bbox=True,
with_label=True),
dict(
type='PackTextDetInputs',
meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor'))
]
ctw1500_textdet_train.pipeline = _base_.train_pipeline
ctw1500_textdet_test.pipeline = ctw_test_pipeline
train_dataloader = dict(
batch_size=8,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=ctw1500_textdet_train)
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=ctw1500_textdet_test)
test_dataloader = val_dataloader
auto_scale_lr = dict(base_batch_size=8)
|