Global: device: gpu epoch_num: 20 log_smooth_window: 20 print_batch_step: 10 output_dir: ./output/rec/u14m_filter/focalsvtr_smtr_maxratio12 save_epoch_step: 1 # evaluation is run every 2000 iterations eval_batch_step: [0, 500] eval_epoch_step: [0, 1] cal_metric_during_train: True pretrained_model: ./output/rec/focalsvtr_smtr/best.pth # ./output/focalnet_subs_nocmff_20ep_u14m_k8_max_ratio12_h8_norand1_h2_padrand_doub_96/best.pth # ./output/rec/focalsvtr_smtr/best.pth checkpoints: use_tensorboard: false infer_img: ../ltb/img # for data or label process character_dict_path: &character_dict_path ./tools/utils/EN_symbol_dict.txt # 96en # ./tools/utils/ppocr_keys_v1.txt # ch max_text_length: &max_text_length 25 use_space_char: &use_space_char False save_res_path: ./output/rec/u14m_filter/predicts_focalsvtr_smtr_maxratio12.txt use_amp: True Optimizer: name: AdamW lr: 0.00065 # for 4gpus bs256/gpu weight_decay: 0.05 filter_bias_and_bn: True LRScheduler: name: OneCycleLR warmup_epoch: 1.5 # pct_start 0.075*20 = 1.5ep cycle_momentum: False Architecture: model_type: rec algorithm: SMTR in_channels: 3 Transform: Encoder: name: FocalSVTR img_size: [32, 128] depths: [6, 6, 6] embed_dim: 96 sub_k: [[1, 1], [2, 1], [1, 1]] focal_levels: [3, 3, 3] last_stage: False Decoder: name: SMTRDecoder num_layer: 1 ds: True max_len: *max_text_length next_mode: &next True sub_str_len: &subsl 5 Loss: name: SMTRLoss PostProcess: name: SMTRLabelDecode next_mode: *next character_dict_path: *character_dict_path use_space_char: *use_space_char Metric: name: RecMetric main_indicator: acc is_filter: True Train: dataset: name: RatioDataSet ds_width: True padding: &padding True padding_rand: True padding_doub: True data_dir_list: ['../Union14M-L-LMDB-Filtered/filter_train_challenging', '../Union14M-L-LMDB-Filtered/filter_train_hard', '../Union14M-L-LMDB-Filtered/filter_train_medium', '../Union14M-L-LMDB-Filtered/filter_train_normal', '../Union14M-L-LMDB-Filtered/filter_train_easy', ] transforms: - DecodeImage: # load image img_mode: BGR channel_first: False - PARSeqAug: - SMTRLabelEncode: # Class handling label sub_str_len: *subsl character_dict_path: *character_dict_path use_space_char: *use_space_char max_text_length: *max_text_length - KeepKeys: keep_keys: ['image', 'label', 'label_subs', 'label_next', 'length_subs', 'label_subs_pre', 'label_next_pre', 'length_subs_pre', 'length'] # dataloader will return list in this order sampler: name: RatioSampler scales: [[128, 32]] # w, h # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple first_bs: &bs 256 fix_bs: false divided_factor: [4, 16] # w, h is_training: True loader: shuffle: True batch_size_per_card: *bs drop_last: True max_ratio: &max_ratio 12 num_workers: 4 Eval: dataset: name: RatioDataSet ds_width: True padding: False padding_rand: False data_dir_list: [ '../evaluation/CUTE80', '../evaluation/IC13_857', '../evaluation/IC15_1811', '../evaluation/IIIT5k', '../evaluation/SVT', '../evaluation/SVTP', ] transforms: - DecodeImage: # load image img_mode: BGR channel_first: False - ARLabelEncode: # Class handling label character_dict_path: *character_dict_path use_space_char: *use_space_char max_text_length: *max_text_length - KeepKeys: keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order sampler: name: RatioSampler scales: [[128, 32]] # w, h # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple first_bs: 128 fix_bs: false divided_factor: [4, 16] # w, h is_training: False loader: shuffle: False drop_last: False max_ratio: *max_ratio batch_size_per_card: 128 num_workers: 4