Spaces:
Sleeping
Sleeping
File size: 2,448 Bytes
9bf4bd7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
_base_ = [
'../_base_/datasets/ctw1500.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_adam_600e.py',
'_base_panet_resnet18_fpem-ffm.py',
]
model = dict(det_head=dict(module_loss=dict(shrink_ratio=(1, 0.7))))
default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=20), )
train_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='LoadOCRAnnotations',
with_polygon=True,
with_bbox=True,
with_label=True,
),
dict(type='ShortScaleAspectJitter', short_size=640, scale_divisor=32),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='RandomRotate', max_angle=10),
dict(type='TextDetRandomCrop', target_size=(640, 640)),
dict(type='Pad', size=(640, 640)),
dict(
type='TorchVisionWrapper',
op='ColorJitter',
brightness=32.0 / 255,
saturation=0.5),
dict(
type='PackTextDetInputs',
meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor'))
]
test_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
# TODO Replace with mmcv.RescaleToShort when it's ready
dict(
type='ShortScaleAspectJitter',
short_size=640,
scale_divisor=1,
ratio_range=(1.0, 1.0),
aspect_ratio_range=(1.0, 1.0)),
dict(
type='LoadOCRAnnotations',
with_polygon=True,
with_bbox=True,
with_label=True),
dict(
type='PackTextDetInputs',
meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor'))
]
# dataset settings
ctw1500_textdet_train = _base_.ctw1500_textdet_train
ctw1500_textdet_test = _base_.ctw1500_textdet_test
# pipeline settings
ctw1500_textdet_train.pipeline = train_pipeline
ctw1500_textdet_test.pipeline = test_pipeline
train_dataloader = dict(
batch_size=16,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=ctw1500_textdet_train)
val_dataloader = dict(
batch_size=1,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=ctw1500_textdet_test)
test_dataloader = val_dataloader
val_evaluator = dict(
type='HmeanIOUMetric', pred_score_thrs=dict(start=0.3, stop=1, step=0.05))
test_evaluator = val_evaluator
auto_scale_lr = dict(base_batch_size=16)
|