Spaces:
Sleeping
Sleeping
File size: 3,322 Bytes
9bf4bd7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
_base_ = [
'_base_fcenet_resnet50_fpn.py',
'../_base_/datasets/totaltext.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_sgd_base.py',
]
default_hooks = dict(
checkpoint=dict(
type='CheckpointHook',
save_best='icdar/hmean',
rule='greater',
_delete_=True))
train_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='LoadOCRAnnotations',
with_polygon=True,
with_bbox=True,
with_label=True,
),
dict(type='FixInvalidPolygon'),
dict(
type='RandomResize',
scale=(800, 800),
ratio_range=(0.75, 2.5),
keep_ratio=True),
dict(
type='TextDetRandomCropFlip',
crop_ratio=0.5,
iter_num=1,
min_area_ratio=0.2),
dict(
type='RandomApply',
transforms=[dict(type='RandomCrop', min_side_ratio=0.3)],
prob=0.8),
dict(
type='RandomApply',
transforms=[
dict(
type='RandomRotate',
max_angle=30,
pad_with_fixed_color=False,
use_canvas=True)
],
prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(type='Resize', scale=800, keep_ratio=True),
dict(type='SourceImagePad', target_scale=800)
],
dict(type='Resize', scale=800, keep_ratio=False)],
prob=[0.6, 0.4]),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(
type='TorchVisionWrapper',
op='ColorJitter',
brightness=32.0 / 255,
saturation=0.5,
contrast=0.5),
dict(
type='PackTextDetInputs',
meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor'))
]
test_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(type='Resize', scale=(1280, 960), keep_ratio=True),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(
type='LoadOCRAnnotations',
with_polygon=True,
with_bbox=True,
with_label=True),
dict(type='FixInvalidPolygon'),
dict(
type='PackTextDetInputs',
meta_keys=('img_path', 'ori_shape', 'img_shape', 'scale_factor'))
]
optim_wrapper = dict(optimizer=dict(lr=1e-3, weight_decay=5e-4))
train_cfg = dict(max_epochs=1500)
# learning policy
param_scheduler = [
dict(type='StepLR', gamma=0.8, step_size=200, end=1200),
]
# dataset settings
totaltext_textdet_train = _base_.totaltext_textdet_train
totaltext_textdet_test = _base_.totaltext_textdet_test
totaltext_textdet_train.pipeline = train_pipeline
totaltext_textdet_test.pipeline = test_pipeline
train_dataloader = dict(
batch_size=16,
num_workers=16,
persistent_workers=True,
pin_memory=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=totaltext_textdet_train)
val_dataloader = dict(
batch_size=1,
num_workers=1,
persistent_workers=True,
pin_memory=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=totaltext_textdet_test)
test_dataloader = val_dataloader
auto_scale_lr = dict(base_batch_size=16)
find_unused_parameters = True
|