global: name: train-abinet phase: train stage: train-super workdir: workdir seed: ~ dataset: train: { roots: [ 'output_tbell_dataset2/', # 'data/training/MJ/MJ_train/', # 'data/training/MJ/MJ_test/', # 'data/training/MJ/MJ_valid/', # 'data/training/ST' ], batch_size: 20 } test: { roots: [ 'output_tbell_dataset2/' ], batch_size: 2 } data_aug: True multiscales: False num_workers: 1 training: epochs: 100000 show_iters: 50 eval_iters: 600 # save_iters: 3000 optimizer: type: AdamW true_wd: False wd: 0.0 bn_wd: False clip_grad: 20 lr: 0.0001 args: { betas: !!python/tuple [0.9, 0.999], # for default Adam } scheduler: { periods: [6, 4], gamma: 0.1, } model: name: 'modules.model_abinet_iter.ABINetIterModel' iter_size: 3 ensemble: '' use_vision: True vision: { checkpoint: workdir/pretrain-vision-model/best-pretrain-vision-model.pth, loss_weight: 1., attention: 'position', backbone: 'transformer', backbone_ln: 3, } language: { checkpoint: workdir/pretrain-language-model/pretrain-language-model.pth, num_layers: 4, loss_weight: 1., detach: True, use_self_attn: False } alignment: { loss_weight: 1., }