Global: device: gpu epoch_num: 20 log_smooth_window: 20 print_batch_step: 10 output_dir: ./output/rec/u14m_filter/svtrv2_tiny_rctc/ save_epoch_step: 1 # evaluation is run every 2000 iterations eval_epoch_step: [0, 1] eval_batch_step: [0, 500] cal_metric_during_train: True pretrained_model: checkpoints: use_tensorboard: false infer_img: # for data or label process character_dict_path: &character_dict_path ./tools/utils/EN_symbol_dict.txt max_text_length: &max_text_length 25 use_space_char: &use_space_char False save_res_path: ./output/rec/u14m_filter/predicts_svtrv2_rctc.txt use_amp: True Optimizer: name: AdamW lr: 0.00065 # for 4gpus bs256/gpu weight_decay: 0.05 filter_bias_and_bn: True LRScheduler: name: OneCycleLR warmup_epoch: 1.5 # pct_start 0.075*20 = 1.5ep cycle_momentum: False Architecture: model_type: rec algorithm: SVTRv2 Transform: Encoder: name: SVTRv2LNConvTwo33 use_pos_embed: False dims: [64, 128, 256] depths: [3, 6, 3] num_heads: [2, 4, 8] mixer: [['Conv','Conv','Conv'],['Conv','Conv','Conv','FGlobal','Global','Global'],['Global','Global','Global']] local_k: [[5, 5], [5, 5], [-1, -1]] sub_k: [[1, 1], [2, 1], [-1, -1]] last_stage: false feat2d: True Decoder: name: RCTCDecoder Loss: name: CTCLoss zero_infinity: True PostProcess: name: CTCLabelDecode Metric: name: RecMetric main_indicator: acc is_filter: True Train: dataset: name: RatioDataSetTVResize ds_width: True padding: False data_dir_list: ['../Union14M-L-LMDB-Filtered/filter_train_challenging', '../Union14M-L-LMDB-Filtered/filter_train_hard', '../Union14M-L-LMDB-Filtered/filter_train_medium', '../Union14M-L-LMDB-Filtered/filter_train_normal', '../Union14M-L-LMDB-Filtered/filter_train_easy', ] transforms: - DecodeImagePIL: # load image img_mode: RGB - PARSeqAugPIL: - CTCLabelEncode: # Class handling label character_dict_path: *character_dict_path use_space_char: *use_space_char max_text_length: *max_text_length - KeepKeys: keep_keys: ['image', 'label', 'length'] sampler: name: RatioSampler scales: [[128, 32]] # w, h # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple first_bs: &bs 256 fix_bs: false divided_factor: [4, 16] # w, h is_training: True loader: shuffle: True batch_size_per_card: *bs drop_last: True max_ratio: 4 num_workers: 4 Eval: dataset: name: RatioDataSetTVResize ds_width: True padding: False data_dir_list: [ '../evaluation/CUTE80', '../evaluation/IC13_857', '../evaluation/IC15_1811', '../evaluation/IIIT5k', '../evaluation/SVT', '../evaluation/SVTP', ] transforms: - DecodeImagePIL: # load image img_mode: RGB - CTCLabelEncode: # Class handling label character_dict_path: *character_dict_path use_space_char: *use_space_char max_text_length: *max_text_length - KeepKeys: keep_keys: ['image', 'label', 'length'] sampler: name: RatioSampler scales: [[128, 32]] # w, h # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple first_bs: 256 fix_bs: false divided_factor: [4, 16] # w, h is_training: False loader: shuffle: False drop_last: False batch_size_per_card: *bs max_ratio: 4 num_workers: 4