OpenOCR-Demo / configs /rec /mgpstr /svtrv2_mgpstr_only_char.yml
topdu's picture
openocr demo
29f689c
raw
history blame
3.96 kB
Global:
device: gpu
epoch_num: 20
log_smooth_window: 20
print_batch_step: 10
output_dir: ./output/rec/u14m_filter/svtrv2_mgpstr_only_char/
eval_epoch_step: [0, 1]
eval_batch_step: [0, 500]
cal_metric_during_train: True
pretrained_model:
checkpoints:
use_tensorboard: false
infer_img:
# for data or label process
character_dict_path: &character_dict_path ./tools/utils/EN_symbol_dict.txt
max_text_length: &max_text_length 25
use_space_char: &use_space_char False
use_amp: True
save_res_path: ./output/rec/u14m_filter/predicts_svtrv2_mgpstr_only_char.txt
Optimizer:
name: AdamW
lr: 0.00065 # 4gpus 256bs/gpu
weight_decay: 0.05
filter_bias_and_bn: True
LRScheduler:
name: OneCycleLR
warmup_epoch: 1.5 # pct_start 0.075*20 = 1.5ep
cycle_momentum: False
Architecture:
model_type: rec
algorithm: MGPSTR
Transform:
Encoder:
name: SVTRv2LNConvTwo33
use_pos_embed: False
out_channels: 256
dims: [128, 256, 384]
depths: [6, 6, 6]
num_heads: [4, 8, 12]
mixer: [['Conv','Conv','Conv','Conv','Conv','Conv'],['Conv','Conv','FGlobal','Global','Global','Global'],['Global','Global','Global','Global','Global','Global']]
local_k: [[5, 5], [5, 5], [-1, -1]]
sub_k: [[1, 1], [2, 1], [-1, -1]]
last_stage: false
feat2d: false
Decoder:
name: MGPDecoder
only_char: &only_char True
Loss:
name: MGPLoss
only_char: *only_char
PostProcess:
name: MPGLabelDecode
character_dict_path: *character_dict_path
use_space_char: *use_space_char
only_char: *only_char
Metric:
name: RecMetric
main_indicator: acc
is_filter: True
Train:
dataset:
name: RatioDataSetTVResize
ds_width: True
padding: false
data_dir_list: ['../Union14M-L-LMDB-Filtered/filter_filter_train_challenging',
'../Union14M-L-LMDB-Filtered/filter_filter_train_hard',
'../Union14M-L-LMDB-Filtered/filter_filter_train_medium',
'../Union14M-L-LMDB-Filtered/filter_filter_train_normal',
'../Union14M-L-LMDB-Filtered/filter_filter_train_easy',
]
transforms:
- DecodeImagePIL: # load image
img_mode: RGB
- PARSeqAugPIL:
- MGPLabelEncode: # Class handling label
character_dict_path: *character_dict_path
use_space_char: *use_space_char
max_text_length: *max_text_length
only_char: *only_char
- KeepKeys:
keep_keys: ['image', 'char_label', 'length'] # dataloader will return list in this order
sampler:
name: RatioSampler
scales: [[128, 32]] # w, h
# divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple
first_bs: &bs 256
fix_bs: false
divided_factor: [4, 16] # w, h
is_training: True
loader:
shuffle: True
batch_size_per_card: *bs
drop_last: True
max_ratio: &max_ratio 4
num_workers: 4
Eval:
dataset:
name: RatioDataSetTVResize
ds_width: True
padding: False
data_dir_list: [
'../evaluation/CUTE80',
'../evaluation/IC13_857',
'../evaluation/IC15_1811',
'../evaluation/IIIT5k',
'../evaluation/SVT',
'../evaluation/SVTP',
]
transforms:
- DecodeImagePIL: # load image
img_mode: RGB
- MGPLabelEncode: # Class handling label
character_dict_path: *character_dict_path
use_space_char: *use_space_char
max_text_length: *max_text_length
only_char: *only_char
- KeepKeys:
keep_keys: ['image', 'char_label', 'length'] # dataloader will return list in this order
sampler:
name: RatioSampler
scales: [[128, 32]] # w, h
# divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple
first_bs: *bs
fix_bs: false
divided_factor: [4, 16] # w, h
is_training: False
loader:
shuffle: False
drop_last: False
batch_size_per_card: *bs
max_ratio: *max_ratio
num_workers: 4