Spaces:
Running
Running
Global: | |
device: gpu | |
epoch_num: 20 | |
log_smooth_window: 20 | |
print_batch_step: 10 | |
output_dir: ./output/rec/u14m_filter/focalsvtr_smtr_long | |
save_epoch_step: 1 | |
# evaluation is run every 2000 iterations | |
eval_batch_step: [0, 500] | |
eval_epoch_step: [0, 1] | |
cal_metric_during_train: True | |
pretrained_model: | |
checkpoints: | |
use_tensorboard: false | |
infer_img: ../ltb/img | |
# for data or label process | |
character_dict_path: &character_dict_path ./tools/utils/EN_symbol_dict.txt # 96en | |
# ./tools/utils/ppocr_keys_v1.txt # ch | |
max_text_length: &max_text_length 200 | |
use_space_char: &use_space_char False | |
save_res_path: ./output/rec/u14m_filter/predicts_focalsvtr_smtr_long.txt | |
use_amp: True | |
Optimizer: | |
name: AdamW | |
lr: 0.00065 # for 4gpus bs256/gpu | |
weight_decay: 0.05 | |
filter_bias_and_bn: True | |
LRScheduler: | |
name: OneCycleLR | |
warmup_epoch: 1.5 # pct_start 0.075*20 = 1.5ep | |
cycle_momentum: False | |
Architecture: | |
model_type: rec | |
algorithm: SMTR | |
in_channels: 3 | |
Transform: | |
Encoder: | |
name: FocalSVTR | |
img_size: [32, 128] | |
depths: [6, 6, 6] | |
embed_dim: 96 | |
sub_k: [[1, 1], [2, 1], [1, 1]] | |
focal_levels: [3, 3, 3] | |
last_stage: False | |
Decoder: | |
name: SMTRDecoder | |
num_layer: 1 | |
ds: True | |
max_len: | |
next_mode: &next True | |
sub_str_len: &subsl 5 | |
infer_aug: True | |
Loss: | |
name: SMTRLoss | |
PostProcess: | |
name: SMTRLabelDecode | |
next_mode: | |
character_dict_path: | |
use_space_char: | |
Metric: | |
name: RecMetric | |
main_indicator: acc | |
is_filter: True | |
Train: | |
dataset: | |
name: RatioDataSet | |
ds_width: True | |
padding: &padding True | |
padding_rand: True | |
padding_doub: True | |
data_dir_list: ['../Union14M-L-LMDB-Filtered/filter_train_challenging', | |
'../Union14M-L-LMDB-Filtered/filter_train_hard', | |
'../Union14M-L-LMDB-Filtered/filter_train_medium', | |
'../Union14M-L-LMDB-Filtered/filter_train_normal', | |
'../Union14M-L-LMDB-Filtered/filter_train_easy', | |
] | |
transforms: | |
- DecodeImage: # load image | |
img_mode: BGR | |
channel_first: False | |
- PARSeqAug: | |
- SMTRLabelEncode: # Class handling label | |
sub_str_len: | |
character_dict_path: | |
use_space_char: | |
max_text_length: | |
- KeepKeys: | |
keep_keys: ['image', 'label', 'label_subs', 'label_next', 'length_subs', | |
'label_subs_pre', 'label_next_pre', 'length_subs_pre', 'length'] # dataloader will return list in this order | |
sampler: | |
name: RatioSampler | |
scales: [[128, 32]] # w, h | |
# divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple | |
first_bs: &bs 256 | |
fix_bs: false | |
divided_factor: [4, 16] # w, h | |
is_training: True | |
loader: | |
shuffle: True | |
batch_size_per_card: | |
drop_last: True | |
max_ratio: &max_ratio 12 | |
num_workers: 4 | |
Eval: | |
dataset: | |
name: SimpleDataSet | |
data_dir: ../ltb/ | |
label_file_list: ['../ltb/ultra_long_70_list.txt'] | |
transforms: | |
- DecodeImage: # load image | |
img_mode: BGR | |
channel_first: False | |
- ARLabelEncode: # Class handling label | |
max_text_length: 200 | |
- SliceResize: | |
image_shape: [3, 32, 128] | |
padding: False | |
max_ratio: 12 | |
- KeepKeys: | |
keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order | |
loader: | |
shuffle: False | |
drop_last: False | |
batch_size_per_card: 1 | |
num_workers: 2 | |